code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase : List[str] = logging.get_logger(__name__) __UpperCAmelCase : int = { "unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json", } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[str] = "lxmert" __UpperCamelCase : Any = {} def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=9_500 , __SCREAMING_SNAKE_CASE=1_600 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=9 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=6.67 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : int = vocab_size UpperCamelCase : int = hidden_size UpperCamelCase : Optional[Any] = num_attention_heads UpperCamelCase : int = hidden_act UpperCamelCase : List[Any] = intermediate_size UpperCamelCase : List[Any] = hidden_dropout_prob UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob UpperCamelCase : Dict = max_position_embeddings UpperCamelCase : Union[str, Any] = type_vocab_size UpperCamelCase : Optional[Any] = initializer_range UpperCamelCase : List[Any] = layer_norm_eps UpperCamelCase : int = num_qa_labels UpperCamelCase : List[str] = num_object_labels UpperCamelCase : List[str] = num_attr_labels UpperCamelCase : List[Any] = l_layers UpperCamelCase : str = x_layers UpperCamelCase : Tuple = r_layers UpperCamelCase : int = visual_feat_dim UpperCamelCase : List[Any] = visual_pos_dim UpperCamelCase : Optional[Any] = visual_loss_normalizer UpperCamelCase : Dict = task_matched UpperCamelCase : List[Any] = task_mask_lm UpperCamelCase : Optional[int] = task_obj_predict UpperCamelCase : Optional[Any] = task_qa UpperCamelCase : Optional[Any] = visual_obj_loss UpperCamelCase : Any = visual_attr_loss UpperCamelCase : Tuple = visual_feat_loss UpperCamelCase : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**__SCREAMING_SNAKE_CASE )
315
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ): """simple docstring""" UpperCamelCase : List[str] = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
315
1
from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) __UpperCAmelCase : str = _symbol_database.Default() __UpperCAmelCase : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile( B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) __UpperCAmelCase : Dict = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: __UpperCAmelCase : Any = None __UpperCAmelCase : str = B"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" __UpperCAmelCase : Optional[Any] = 45 __UpperCAmelCase : Optional[int] = 1581 __UpperCAmelCase : str = 1517 __UpperCAmelCase : Union[str, Any] = 1570 __UpperCAmelCase : List[str] = 1584 __UpperCAmelCase : Any = 1793 __UpperCAmelCase : Union[str, Any] = 1795 __UpperCAmelCase : Union[str, Any] = 1916 __UpperCAmelCase : str = 1864 __UpperCAmelCase : Union[str, Any] = 1905 __UpperCAmelCase : Tuple = 1919 __UpperCAmelCase : List[Any] = 2429 __UpperCAmelCase : List[str] = 2208 __UpperCAmelCase : Any = 2418 __UpperCAmelCase : Any = 2323 __UpperCAmelCase : Tuple = 2407 # @@protoc_insertion_point(module_scope)
315
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = eval_examples UpperCamelCase : Optional[Any] = post_process_function def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ): """simple docstring""" UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCamelCase : Any = self.compute_metrics UpperCamelCase : List[Any] = None UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCamelCase : Dict = time.time() try: UpperCamelCase : str = eval_loop( __SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: UpperCamelCase : Union[str, Any] = compute_metrics UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions ) UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE ) metrics.update(output.metrics ) else: UpperCamelCase : List[Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(__SCREAMING_SNAKE_CASE ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE ) return metrics def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ): """simple docstring""" UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE ) # Temporarily disable metric computation, we will do it in the loop here. UpperCamelCase : Union[str, Any] = self.compute_metrics UpperCamelCase : Tuple = None UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCamelCase : Optional[int] = time.time() try: UpperCamelCase : int = eval_loop( __SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: UpperCamelCase : int = compute_metrics UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' ) UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
315
1
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __UpperCAmelCase : str = logging.get_logger(__name__) def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ): """simple docstring""" UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() ) class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[str] = CLIPConfig __UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"] def __init__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = CLIPVisionModel(config.vision_config ) UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE ) @torch.no_grad() def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy() UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy() UpperCamelCase : Dict = [] UpperCamelCase : List[str] = image_embeds.shape[0] for i in range(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images UpperCamelCase : Optional[int] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): UpperCamelCase : List[str] = special_cos_dist[i][concept_idx] UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item() UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} ) UpperCamelCase : Optional[int] = 0.01 for concept_idx in range(len(cos_dist[0] ) ): UpperCamelCase : Optional[int] = cos_dist[i][concept_idx] UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item() UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE ) result.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ) UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images UpperCamelCase : Union[str, Any] = 0.0 UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 ) UpperCamelCase : int = special_care * 0.01 UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
315
from __future__ import annotations import collections import pprint from pathlib import Path def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return "".join(sorted(SCREAMING_SNAKE_CASE_ ) ) def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return word_by_signature[signature(SCREAMING_SNAKE_CASE_ )] __UpperCAmelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8") __UpperCAmelCase : Tuple = sorted({word.strip().lower() for word in data.splitlines()}) __UpperCAmelCase : Union[str, Any] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": __UpperCAmelCase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("anagrams.txt", "w") as file: file.write("all_anagrams = \n ") file.write(pprint.pformat(all_anagrams))
315
1
from __future__ import annotations from typing import Any def a ( SCREAMING_SNAKE_CASE_ : list ): """simple docstring""" if not postfix_notation: return 0 UpperCamelCase : Optional[Any] = {'''+''', '''-''', '''*''', '''/'''} UpperCamelCase : list[Any] = [] for token in postfix_notation: if token in operations: UpperCamelCase , UpperCamelCase : Optional[Any] = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(SCREAMING_SNAKE_CASE_ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
315
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ): """simple docstring""" UpperCamelCase : list[list[float]] = [] for data in source_data: for i, el in enumerate(SCREAMING_SNAKE_CASE_ ): if len(SCREAMING_SNAKE_CASE_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) ) return data_lists def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ): """simple docstring""" UpperCamelCase : list[list[float]] = [] for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: UpperCamelCase : Dict = F"""Invalid weight of {weight:f} provided""" raise ValueError(SCREAMING_SNAKE_CASE_ ) score_lists.append(SCREAMING_SNAKE_CASE_ ) return score_lists def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ): """simple docstring""" UpperCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : str = final_scores[j] + ele return final_scores def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ): """simple docstring""" UpperCamelCase : str = get_data(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = generate_final_scores(SCREAMING_SNAKE_CASE_ ) # append scores to source data for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ): source_data[i].append(SCREAMING_SNAKE_CASE_ ) return source_data
315
1
from __future__ import annotations def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return [ord(SCREAMING_SNAKE_CASE_ ) - 9_6 for elem in plain] def a ( SCREAMING_SNAKE_CASE_ : list[int] ): """simple docstring""" return "".join(chr(elem + 9_6 ) for elem in encoded ) def a ( ): """simple docstring""" UpperCamelCase : Dict = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , SCREAMING_SNAKE_CASE_ ) print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": main()
315
import glob import os import random from string import ascii_lowercase, digits import cva __UpperCAmelCase : Optional[int] = "" __UpperCAmelCase : Union[str, Any] = "" __UpperCAmelCase : Optional[int] = "" __UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal) def a ( ): """simple docstring""" UpperCamelCase , UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print('''Processing...''' ) UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for index, image in enumerate(SCREAMING_SNAKE_CASE_ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' UpperCamelCase : Optional[int] = random_chars(3_2 ) UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] ) print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" ) UpperCamelCase : Any = [] for anno in new_annos[index]: UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(SCREAMING_SNAKE_CASE_ ) with open(F"""/{file_root}.txt""" , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" UpperCamelCase : Any = [] UpperCamelCase : Union[str, Any] = [] for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ): UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(SCREAMING_SNAKE_CASE_ ) as in_file: UpperCamelCase : List[str] = in_file.readlines() UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" ) UpperCamelCase : Union[str, Any] = [] for obj_list in obj_lists: UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(SCREAMING_SNAKE_CASE_ ) labels.append(SCREAMING_SNAKE_CASE_ ) return img_paths, labels def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ): """simple docstring""" UpperCamelCase : List[Any] = [] UpperCamelCase : str = [] UpperCamelCase : int = [] for idx in range(len(SCREAMING_SNAKE_CASE_ ) ): UpperCamelCase : Tuple = [] UpperCamelCase : Optional[int] = img_list[idx] path_list.append(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = anno_list[idx] UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ ) if flip_type == 1: UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for bbox in img_annos: UpperCamelCase : Optional[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for bbox in img_annos: UpperCamelCase : Union[str, Any] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(SCREAMING_SNAKE_CASE_ ) new_imgs_list.append(SCREAMING_SNAKE_CASE_ ) return new_imgs_list, new_annos_lists, path_list def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" UpperCamelCase : Any = ascii_lowercase + digits return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": main() print("DONE ✅")
315
1
def a ( SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase : List[str] = abs(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = 0 while n > 0: res += n % 1_0 n //= 1_0 return res def a ( SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase : str = abs(SCREAMING_SNAKE_CASE_ ) return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 ) def a ( SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" return sum(int(SCREAMING_SNAKE_CASE_ ) for c in str(abs(SCREAMING_SNAKE_CASE_ ) ) ) def a ( ): """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(SCREAMING_SNAKE_CASE_ : Callable , SCREAMING_SNAKE_CASE_ : int ) -> None: UpperCamelCase : int = F"""{func.__name__}({value})""" UpperCamelCase : List[Any] = timeit(F"""__main__.{call}""" , setup='''import __main__''' ) print(F"""{call:56} = {func(SCREAMING_SNAKE_CASE_ )} -- {timing:.4f} seconds""" ) for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
315
import qiskit def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' ) UpperCamelCase : Any = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment return job.result().get_counts(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": __UpperCAmelCase : int = half_adder(1, 1) print(f'''Half Adder Output Qubit Counts: {counts}''')
315
1
import os import time import numpy as np import onnxruntime as ort __UpperCAmelCase : Optional[int] = "1" __UpperCAmelCase : List[Any] = "0" __UpperCAmelCase : int = "1" __UpperCAmelCase : Optional[int] = ort.SessionOptions() __UpperCAmelCase : str = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print("Create inference session...") __UpperCAmelCase : Optional[Any] = ["TensorrtExecutionProvider", "CUDAExecutionProvider"] __UpperCAmelCase : str = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider) __UpperCAmelCase : Union[str, Any] = ort.RunOptions() __UpperCAmelCase : Optional[Any] = 128 __UpperCAmelCase : Any = 1 __UpperCAmelCase : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa) __UpperCAmelCase : int = np.ones((batch, sequence), dtype=np.intaa) __UpperCAmelCase : Tuple = np.ones((batch, sequence), dtype=np.intaa) print("Warm up phase...") sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("Start inference...") __UpperCAmelCase : Optional[int] = time.time() __UpperCAmelCase : int = 2000 __UpperCAmelCase : List[str] = {} for iter in range(max_iters): __UpperCAmelCase : Dict = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 1000 / max_iters))
315
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __UpperCAmelCase : str = logging.get_logger(__name__) def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ): """simple docstring""" UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() ) class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[str] = CLIPConfig __UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"] def __init__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = CLIPVisionModel(config.vision_config ) UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE ) @torch.no_grad() def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy() UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy() UpperCamelCase : Dict = [] UpperCamelCase : List[str] = image_embeds.shape[0] for i in range(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images UpperCamelCase : Optional[int] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): UpperCamelCase : List[str] = special_cos_dist[i][concept_idx] UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item() UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} ) UpperCamelCase : Optional[int] = 0.01 for concept_idx in range(len(cos_dist[0] ) ): UpperCamelCase : Optional[int] = cos_dist[i][concept_idx] UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item() UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE ) result.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ) UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images UpperCamelCase : Union[str, Any] = 0.0 UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 ) UpperCamelCase : int = special_care * 0.01 UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
315
1
import functools from typing import Any def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : list[str] ): """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) == 0: raise ValueError('''the string should be not empty string''' ) if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not all( isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) > 0 for item in words ): raise ValueError('''the words should be a list of non-empty strings''' ) # Build trie UpperCamelCase : dict[str, Any] = {} UpperCamelCase : Dict = '''WORD_KEEPER''' for word in words: UpperCamelCase : Union[str, Any] = trie for c in word: if c not in trie_node: UpperCamelCase : Tuple = {} UpperCamelCase : int = trie_node[c] UpperCamelCase : List[Any] = True UpperCamelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) # Dynamic programming method @functools.cache def is_breakable(SCREAMING_SNAKE_CASE_ : int ) -> bool: if index == len_string: return True UpperCamelCase : str = trie for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : List[str] = trie_node.get(string[i] , SCREAMING_SNAKE_CASE_ ) if trie_node is None: return False if trie_node.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
315
from argparse import ArgumentParser from .env import EnvironmentCommand def a ( ): """simple docstring""" UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' ) UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' ) # Register commands EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ ) # Let's go UpperCamelCase : List[Any] = parser.parse_args() if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ): parser.print_help() exit(1 ) # Run UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ ) service.run() if __name__ == "__main__": main()
315
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __UpperCAmelCase : Tuple = { "configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Any = ["ConvNextFeatureExtractor"] __UpperCAmelCase : Any = ["ConvNextImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : int = [ "CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "ConvNextForImageClassification", "ConvNextModel", "ConvNextPreTrainedModel", "ConvNextBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : str = [ "TFConvNextForImageClassification", "TFConvNextModel", "TFConvNextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys __UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
315
def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return "".join(chr(ord(SCREAMING_SNAKE_CASE_ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
315
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
315
import math def a ( SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" UpperCamelCase : Tuple = factor * value UpperCamelCase : Optional[int] = value while not is_prime(SCREAMING_SNAKE_CASE_ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ ) return value
315
1
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = tempfile.mkdtemp() # fmt: off UpperCamelCase : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) UpperCamelCase : Optional[int] = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } UpperCamelCase : Any = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _lowercase ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def _lowercase ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCamelCase : Optional[Any] = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = self.get_tokenizer() UpperCamelCase : str = self.get_image_processor() UpperCamelCase : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase : Optional[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) UpperCamelCase : List[str] = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 ) UpperCamelCase : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = self.get_image_processor() UpperCamelCase : str = self.get_tokenizer() UpperCamelCase : List[str] = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = self.prepare_image_inputs() UpperCamelCase : Optional[int] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ) UpperCamelCase : List[str] = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = self.get_image_processor() UpperCamelCase : List[Any] = self.get_tokenizer() UpperCamelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = '''lower newer''' UpperCamelCase : List[Any] = processor(text=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = tokenizer(__SCREAMING_SNAKE_CASE ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = self.get_image_processor() UpperCamelCase : Optional[Any] = self.get_tokenizer() UpperCamelCase : Dict = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = '''lower newer''' UpperCamelCase : str = self.prepare_image_inputs() UpperCamelCase : Tuple = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(__SCREAMING_SNAKE_CASE ): processor() def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = self.get_image_processor() UpperCamelCase : List[str] = self.get_tokenizer() UpperCamelCase : Dict = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase : List[Any] = processor.batch_decode(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.get_image_processor() UpperCamelCase : Union[str, Any] = self.get_tokenizer() UpperCamelCase : Dict = VisionTextDualEncoderProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = '''lower newer''' UpperCamelCase : Optional[Any] = self.prepare_image_inputs() UpperCamelCase : Any = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
315
import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor __UpperCAmelCase : Optional[int] = logging.get_logger(__name__) class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" warnings.warn( '''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , ) super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
315
1
import os def a ( SCREAMING_SNAKE_CASE_ : str = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) as input_file: UpperCamelCase : int = [ [int(SCREAMING_SNAKE_CASE_ ) for element in line.split(''',''' )] for line in input_file.readlines() ] UpperCamelCase : List[str] = len(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = len(matrix[0] ) UpperCamelCase : Union[str, Any] = [[-1 for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )] for i in range(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Any = matrix[i][0] for j in range(1 , SCREAMING_SNAKE_CASE_ ): for i in range(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Union[str, Any] = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Dict = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): UpperCamelCase : List[Any] = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'''{solution() = }''')
315
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ): """simple docstring""" UpperCamelCase : List[str] = size if size is not None else {'''height''': 18, '''width''': 18} UpperCamelCase : int = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : Optional[int] = num_channels UpperCamelCase : Union[str, Any] = image_size UpperCamelCase : Union[str, Any] = min_resolution UpperCamelCase : Tuple = max_resolution UpperCamelCase : List[str] = do_resize UpperCamelCase : List[str] = size UpperCamelCase : int = apply_ocr def _lowercase ( self ): """simple docstring""" return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class UpperCAmelCase_ ( _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = LayoutLMvaImageProcessingTester(self ) @property def _lowercase ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''apply_ocr''' ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def _lowercase ( self ): """simple docstring""" pass def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE ) self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE ) # Test batched UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCamelCase : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCamelCase : Optional[int] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = LayoutLMvaImageProcessor() from datasets import load_dataset UpperCamelCase : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' ) UpperCamelCase : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 UpperCamelCase : Union[str, Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 UpperCamelCase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE ) self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE ) # with apply_OCR = False UpperCamelCase : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
315
1
import os from collections.abc import Iterator def a ( SCREAMING_SNAKE_CASE_ : str = "." ): """simple docstring""" for dir_path, dir_names, filenames in os.walk(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : str = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._'''] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(SCREAMING_SNAKE_CASE_ )[1] in (".py", ".ipynb"): yield os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).lstrip('''./''' ) def a ( SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" return F"""{i * " "}*""" if i else "\n##" def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" UpperCamelCase : Dict = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(SCREAMING_SNAKE_CASE_ ) or old_parts[i] != new_part) and new_part: print(F"""{md_prefix(SCREAMING_SNAKE_CASE_ )} {new_part.replace("_" , " " ).title()}""" ) return new_path def a ( SCREAMING_SNAKE_CASE_ : str = "." ): """simple docstring""" UpperCamelCase : Optional[Any] = '''''' for filepath in sorted(good_file_paths(SCREAMING_SNAKE_CASE_ ) ): UpperCamelCase , UpperCamelCase : Optional[int] = os.path.split(SCREAMING_SNAKE_CASE_ ) if filepath != old_path: UpperCamelCase : Any = print_path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = (filepath.count(os.sep ) + 1) if filepath else 0 UpperCamelCase : Union[str, Any] = F"""{filepath}/{filename}""".replace(''' ''' , '''%20''' ) UpperCamelCase : Dict = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0] print(F"""{md_prefix(SCREAMING_SNAKE_CASE_ )} [{filename}]({url})""" ) if __name__ == "__main__": print_directory_md(".")
315
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def a ( SCREAMING_SNAKE_CASE_ : dict ): """simple docstring""" return (data["data"], data["target"]) def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ): """simple docstring""" UpperCamelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 ) xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Predict target for test data UpperCamelCase : Any = xgb.predict(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 ) return predictions def a ( ): """simple docstring""" UpperCamelCase : Tuple = fetch_california_housing() UpperCamelCase , UpperCamelCase : Tuple = data_handling(SCREAMING_SNAKE_CASE_ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = train_test_split( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 ) UpperCamelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Error printing print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" ) print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
315
1
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ): """simple docstring""" UpperCamelCase , UpperCamelCase : Any = image.size UpperCamelCase , UpperCamelCase : Optional[int] = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32 UpperCamelCase : Union[str, Any] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) UpperCamelCase : List[Any] = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) / 255.0 UpperCamelCase : Union[str, Any] = image[None].transpose(0 , 3 , 1 , 2 ) UpperCamelCase : List[str] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ) return 2.0 * image - 1.0 class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__() self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 100 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ): """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ): UpperCamelCase : Optional[int] = 1 elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ): UpperCamelCase : Optional[int] = image.shape[0] else: raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE )}""" ) if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ): UpperCamelCase : Tuple = preprocess(__SCREAMING_SNAKE_CASE ) UpperCamelCase , UpperCamelCase : Optional[int] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image UpperCamelCase : Dict = (batch_size, self.unet.config.in_channels // 2, height, width) UpperCamelCase : List[Any] = next(self.unet.parameters() ).dtype UpperCamelCase : Tuple = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = image.to(device=self.device , dtype=__SCREAMING_SNAKE_CASE ) # set timesteps and move to the correct device self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device ) UpperCamelCase : Union[str, Any] = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase : List[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase : Optional[Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase : Tuple = {} if accepts_eta: UpperCamelCase : Tuple = eta for t in self.progress_bar(__SCREAMING_SNAKE_CASE ): # concat latents and low resolution image in the channel dimension. UpperCamelCase : int = torch.cat([latents, image] , dim=1 ) UpperCamelCase : Optional[Any] = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # predict the noise residual UpperCamelCase : int = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase : Optional[Any] = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample # decode the image latents with the VQVAE UpperCamelCase : Optional[Any] = self.vqvae.decode(__SCREAMING_SNAKE_CASE ).sample UpperCamelCase : Any = torch.clamp(__SCREAMING_SNAKE_CASE , -1.0 , 1.0 ) UpperCamelCase : Tuple = image / 2 + 0.5 UpperCamelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase : List[Any] = self.numpy_to_pil(__SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
315
__UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" __UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}] __UpperCAmelCase : Union[str, Any] = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
315
1
from collections.abc import Callable import numpy as np def a ( SCREAMING_SNAKE_CASE_ : Callable , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ): """simple docstring""" UpperCamelCase : Dict = int(np.ceil((x_end - xa) / step_size ) ) UpperCamelCase : Optional[int] = np.zeros((n + 1,) ) UpperCamelCase : Optional[Any] = ya UpperCamelCase : List[str] = xa for k in range(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : int = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE_ , y[k] ) UpperCamelCase : Optional[int] = y[k] + ( (step_size / 2) * (ode_func(SCREAMING_SNAKE_CASE_ , y[k] ) + ode_func(x + step_size , SCREAMING_SNAKE_CASE_ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
315
import collections import os import re from pathlib import Path __UpperCAmelCase : List[str] = "src/transformers" # Matches is_xxx_available() __UpperCAmelCase : int = re.compile(r"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} __UpperCAmelCase : Optional[int] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __UpperCAmelCase : List[Any] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available __UpperCAmelCase : List[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") __UpperCAmelCase : str = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", __UpperCAmelCase : Dict = re.compile(r"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], __UpperCAmelCase : str = re.compile(r"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo __UpperCAmelCase : str = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: __UpperCAmelCase : Any = re.compile(r"^\s*try:") # Catches a line with else: __UpperCAmelCase : List[Any] = re.compile(r"^\s*else:") def a ( SCREAMING_SNAKE_CASE_ : Dict ): """simple docstring""" if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None: return None UpperCamelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )] backends.sort() return "_and_".join(SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCamelCase : Tuple = f.readlines() UpperCamelCase : Tuple = 0 while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(SCREAMING_SNAKE_CASE_ ): return None # First grab the objects without a specific backend in _import_structure UpperCamelCase : List[Any] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: UpperCamelCase : Optional[int] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0] UpperCamelCase : str = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue UpperCamelCase : List[Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 UpperCamelCase : Dict = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCamelCase : Dict = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCamelCase : Optional[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCamelCase : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): UpperCamelCase : str = lines[line_index] if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None: objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] ) elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None: UpperCamelCase : Union[str, Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' ) UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None: UpperCamelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' ) UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None: objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 1_2 + '''"''' ): objects.append(line[1_3:-3] ) line_index += 1 UpperCamelCase : Tuple = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCamelCase : int = [] while ( line_index < len(SCREAMING_SNAKE_CASE_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): UpperCamelCase : Tuple = lines[line_index] UpperCamelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCamelCase : Any = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(SCREAMING_SNAKE_CASE_ ): # If the line is an if is_backend_available, we grab all objects associated. UpperCamelCase : Optional[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCamelCase : Dict = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCamelCase : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): UpperCamelCase : Optional[Any] = lines[line_index] UpperCamelCase : str = _re_import.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 1_2 ): objects.append(line[1_2:-2] ) line_index += 1 UpperCamelCase : str = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ): """simple docstring""" def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ): return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCamelCase : Dict = [] for key in import_dict_objects.keys(): UpperCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCamelCase : List[str] = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def a ( ): """simple docstring""" UpperCamelCase : Any = [] for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ): if "__init__.py" in files: UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) UpperCamelCase : Optional[int] = parse_init(SCREAMING_SNAKE_CASE_ ) if objects is not None: UpperCamelCase : str = analyze_results(*SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: UpperCamelCase : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) if len(SCREAMING_SNAKE_CASE_ ) > 0: raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) ) def a ( ): """simple docstring""" UpperCamelCase : Dict = [] for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(SCREAMING_SNAKE_CASE_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0: continue UpperCamelCase : List[str] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : str = short_path.replace(os.path.sep , '''.''' ) submodules.append(SCREAMING_SNAKE_CASE_ ) for fname in files: if fname == "__init__.py": continue UpperCamelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(SCREAMING_SNAKE_CASE_ ) return submodules __UpperCAmelCase : Optional[int] = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def a ( ): """simple docstring""" from transformers.utils import direct_transformers_import UpperCamelCase : Tuple = direct_transformers_import(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , '''r''' ) as f: UpperCamelCase : List[Any] = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE_ ) ) ) UpperCamelCase : Union[str, Any] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(SCREAMING_SNAKE_CASE_ ) > 0: UpperCamelCase : str = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
315
1
def a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ): """simple docstring""" UpperCamelCase : Tuple = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('''All input parameters must be positive''' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('''Relative densities cannot be greater than one''' ) else: UpperCamelCase : Any = 1 - (matter_density + radiation_density + dark_energy) UpperCamelCase : Union[str, Any] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) UpperCamelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation __UpperCAmelCase : Union[str, Any] = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
315
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" UpperCamelCase : Any = set() # Replace all the whitespace in our sentence UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(SCREAMING_SNAKE_CASE_ ) == 2_6 def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" UpperCamelCase : str = [False] * 2_6 for char in input_str: if char.islower(): UpperCamelCase : List[Any] = True elif char.isupper(): UpperCamelCase : List[Any] = True return all(SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6 def a ( ): """simple docstring""" from timeit import timeit UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest''' print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) ) print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) ) print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
315
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase : str = logging.get_logger(__name__) __UpperCAmelCase : Optional[int] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Tuple = "megatron-bert" def __init__( self , __SCREAMING_SNAKE_CASE=29_056 , __SCREAMING_SNAKE_CASE=1_024 , __SCREAMING_SNAKE_CASE=24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=4_096 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = vocab_size UpperCamelCase : Union[str, Any] = hidden_size UpperCamelCase : Tuple = num_hidden_layers UpperCamelCase : Any = num_attention_heads UpperCamelCase : Tuple = hidden_act UpperCamelCase : Tuple = intermediate_size UpperCamelCase : List[Any] = hidden_dropout_prob UpperCamelCase : List[str] = attention_probs_dropout_prob UpperCamelCase : Optional[int] = max_position_embeddings UpperCamelCase : Any = type_vocab_size UpperCamelCase : List[Any] = initializer_range UpperCamelCase : Union[str, Any] = layer_norm_eps UpperCamelCase : int = position_embedding_type UpperCamelCase : List[Any] = use_cache
315
import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) __UpperCAmelCase : Union[str, Any] = logging.getLogger() def a ( ): """simple docstring""" UpperCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument('''-f''' ) UpperCamelCase : List[str] = parser.parse_args() return args.f class UpperCAmelCase_ ( _a): '''simple docstring''' def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout ) logger.addHandler(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Dict = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , '''run_glue_deebert.py''' ) with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ): UpperCamelCase : int = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(__SCREAMING_SNAKE_CASE , 0.666 ) @slow @require_torch_non_multi_gpu def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = ''' --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage '''.split() self.run_and_check(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(__SCREAMING_SNAKE_CASE )
315
1
from collections import defaultdict def a ( SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase : int = 1 UpperCamelCase : List[Any] = True for v in tree[start]: if v not in visited: ret += dfs(SCREAMING_SNAKE_CASE_ ) if ret % 2 == 0: cuts.append(SCREAMING_SNAKE_CASE_ ) return ret def a ( ): """simple docstring""" dfs(1 ) if __name__ == "__main__": __UpperCAmelCase , __UpperCAmelCase : Tuple = 10, 9 __UpperCAmelCase : str = defaultdict(list) __UpperCAmelCase : dict[int, bool] = {} __UpperCAmelCase : list[int] = [] __UpperCAmelCase : Union[str, Any] = 0 __UpperCAmelCase : int = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
315
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase : Tuple = logging.get_logger(__name__) __UpperCAmelCase : Union[str, Any] = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[Any] = "ibert" def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="none" , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = vocab_size UpperCamelCase : Optional[int] = hidden_size UpperCamelCase : Tuple = num_hidden_layers UpperCamelCase : Optional[Any] = num_attention_heads UpperCamelCase : Dict = hidden_act UpperCamelCase : Union[str, Any] = intermediate_size UpperCamelCase : str = hidden_dropout_prob UpperCamelCase : Any = attention_probs_dropout_prob UpperCamelCase : Dict = max_position_embeddings UpperCamelCase : Union[str, Any] = type_vocab_size UpperCamelCase : Optional[Any] = initializer_range UpperCamelCase : Union[str, Any] = layer_norm_eps UpperCamelCase : Dict = position_embedding_type UpperCamelCase : int = quant_mode UpperCamelCase : Any = force_dequant class UpperCAmelCase_ ( _a): '''simple docstring''' @property def _lowercase ( self ): """simple docstring""" if self.task == "multiple-choice": UpperCamelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
315
1
import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase_ ( _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : Any = ProphetNetTokenizer __UpperCamelCase : List[Any] = False def _lowercase ( self ): """simple docstring""" super().setUp() UpperCamelCase : Dict = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] UpperCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = '''UNwant\u00E9d,running''' UpperCamelCase : str = '''unwanted, running''' return input_text, output_text def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = self.tokenizer_class(self.vocab_file ) UpperCamelCase : Optional[int] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [9, 6, 7, 12, 10, 11] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] UpperCamelCase : Optional[Any] = {} for i, token in enumerate(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Union[str, Any] = i UpperCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=__SCREAMING_SNAKE_CASE , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) @require_torch def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' ) UpperCamelCase : List[str] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] UpperCamelCase : List[Any] = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102] UpperCamelCase : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def _lowercase ( self ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def _lowercase ( self ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def _lowercase ( self ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' ) UpperCamelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
315
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup __UpperCAmelCase : int = logging.get_logger(__name__) class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[Any] = [] UpperCamelCase : int = [] UpperCamelCase : List[Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag UpperCamelCase : Tuple = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) ) UpperCamelCase : Optional[Any] = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[Any] = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' ) UpperCamelCase : Union[str, Any] = [] UpperCamelCase : List[str] = [] UpperCamelCase : str = [] for element in html_code.descendants: if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue UpperCamelCase : Any = html.unescape(__SCREAMING_SNAKE_CASE ).strip() if not text_in_this_tag: continue all_doc_strings.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase , UpperCamelCase : int = self.xpath_soup(__SCREAMING_SNAKE_CASE ) stringaxtag_seq.append(__SCREAMING_SNAKE_CASE ) stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[Any] = '''''' for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): xpath += f"""/{tagname}""" if subs != 0: xpath += f"""[{subs}]""" return xpath def __call__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = False # Check that strings has a valid type if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[Any] = True elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ): if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ): UpperCamelCase : List[str] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' f"""but is of type {type(__SCREAMING_SNAKE_CASE )}.""" ) UpperCamelCase : int = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) ) if not is_batched: UpperCamelCase : Union[str, Any] = [html_strings] # Get nodes + xpaths UpperCamelCase : str = [] UpperCamelCase : int = [] for html_string in html_strings: UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self.get_three_from_single(__SCREAMING_SNAKE_CASE ) nodes.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = [] for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : str = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) xpath_strings.append(__SCREAMING_SNAKE_CASE ) xpaths.append(__SCREAMING_SNAKE_CASE ) # return as Dict UpperCamelCase : List[str] = {'''nodes''': nodes, '''xpaths''': xpaths} UpperCamelCase : List[Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) return encoded_inputs
315
1
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" UpperCamelCase : Any = set() # Replace all the whitespace in our sentence UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(SCREAMING_SNAKE_CASE_ ) == 2_6 def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" UpperCamelCase : str = [False] * 2_6 for char in input_str: if char.islower(): UpperCamelCase : List[Any] = True elif char.isupper(): UpperCamelCase : List[Any] = True return all(SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6 def a ( ): """simple docstring""" from timeit import timeit UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest''' print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) ) print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) ) print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
315
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params __UpperCAmelCase : List[str] = getLogger(__name__) __UpperCAmelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu" def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : int="summarization" , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Any , ): """simple docstring""" UpperCamelCase : Dict = Path(SCREAMING_SNAKE_CASE_ ).open('''w''' , encoding='''utf-8''' ) UpperCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) if fpaa: UpperCamelCase : List[Any] = model.half() UpperCamelCase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type. UpperCamelCase : int = time.time() # update config with task specific params use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if prefix is None: UpperCamelCase : Union[str, Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ): UpperCamelCase : Optional[int] = [prefix + text for text in examples_chunk] UpperCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ , padding='''longest''' ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE_ , ) UpperCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) for hypothesis in dec: fout.write(hypothesis + '''\n''' ) fout.flush() fout.close() UpperCamelCase : str = int(time.time() - start_time ) # seconds UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def a ( ): """simple docstring""" return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' ) def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ): """simple docstring""" UpperCamelCase : int = argparse.ArgumentParser() parser.add_argument('''model_name''' , type=SCREAMING_SNAKE_CASE_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''input_path''' , type=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.source''' ) parser.add_argument('''save_path''' , type=SCREAMING_SNAKE_CASE_ , help='''where to save summaries''' ) parser.add_argument('''--reference_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.target''' ) parser.add_argument('''--score_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default='''metrics.json''' , help='''where to save metrics''' ) parser.add_argument('''--device''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''cuda, cuda:1, cpu etc.''' ) parser.add_argument( '''--prefix''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--task''' , type=SCREAMING_SNAKE_CASE_ , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help='''batch size''' ) parser.add_argument( '''--n_obs''' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='''How many observations. Defaults to all.''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' ) parser.add_argument( '''--info''' , nargs='''?''' , type=SCREAMING_SNAKE_CASE_ , const=datetime_now() , help=( '''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.''' ''' lang=en-ru. If no value is passed, the current datetime string will be used.''' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate UpperCamelCase , UpperCamelCase : int = parser.parse_known_args() UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ ) if parsed_args and verbose: print(F"""parsed the following generate kwargs: {parsed_args}""" ) UpperCamelCase : str = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: UpperCamelCase : Tuple = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('''Can\'t mix --fp16 and --device cpu''' ) UpperCamelCase : str = generate_summaries_or_translations( SCREAMING_SNAKE_CASE_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE_ , ) if args.reference_path is None: return {} # Compute scores UpperCamelCase : Tuple = calculate_bleu if '''translation''' in args.task else calculate_rouge UpperCamelCase : Dict = [x.rstrip() for x in open(args.save_path ).readlines()] UpperCamelCase : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE_ )] UpperCamelCase : dict = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) scores.update(SCREAMING_SNAKE_CASE_ ) if args.dump_args: scores.update(SCREAMING_SNAKE_CASE_ ) if args.info: UpperCamelCase : Optional[Any] = args.info if verbose: print(SCREAMING_SNAKE_CASE_ ) if args.score_path is not None: json.dump(SCREAMING_SNAKE_CASE_ , open(args.score_path , '''w''' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
315
1
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration __UpperCAmelCase : Dict = HfArgumentParser(InitializationArguments) __UpperCAmelCase : List[str] = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization __UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks __UpperCAmelCase : Union[str, Any] = { "vocab_size": len(tokenizer), "scale_attn_by_inverse_layer_idx": True, "reorder_and_upcast_attn": True, } # Load model config (GPT-2 large in this case) __UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config __UpperCAmelCase : str = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
315
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : int = ["image_processor", "tokenizer"] __UpperCamelCase : List[str] = "AutoImageProcessor" __UpperCamelCase : Optional[Any] = "AutoTokenizer" def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __SCREAMING_SNAKE_CASE , ) UpperCamelCase : Any = kwargs.pop('''feature_extractor''' ) UpperCamelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = self.image_processor UpperCamelCase : int = False def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = kwargs.pop('''images''' , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = kwargs.pop('''text''' , __SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: UpperCamelCase : Union[str, Any] = args[0] UpperCamelCase : str = args[1:] if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: UpperCamelCase : List[str] = self.image_processor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if text is not None: UpperCamelCase : Optional[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if text is None: return inputs elif images is None: return encodings else: UpperCamelCase : List[str] = encodings['''input_ids'''] return inputs def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @contextmanager def _lowercase ( self ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your images inputs, or in a separate call.''' ) UpperCamelCase : Any = True UpperCamelCase : int = self.tokenizer yield UpperCamelCase : List[Any] = self.image_processor UpperCamelCase : Tuple = False def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" if added_vocab is None: UpperCamelCase : str = self.tokenizer.get_added_vocab() UpperCamelCase : int = {} while tokens: UpperCamelCase : Dict = re.search(R'''<s_(.*?)>''' , __SCREAMING_SNAKE_CASE , re.IGNORECASE ) if start_token is None: break UpperCamelCase : List[str] = start_token.group(1 ) UpperCamelCase : Dict = re.search(Rf"""</s_{key}>""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE ) UpperCamelCase : Any = start_token.group() if end_token is None: UpperCamelCase : Optional[int] = tokens.replace(__SCREAMING_SNAKE_CASE , '''''' ) else: UpperCamelCase : Dict = end_token.group() UpperCamelCase : int = re.escape(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = re.escape(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE ) if content is not None: UpperCamelCase : Dict = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node UpperCamelCase : Tuple = self.tokenajson(__SCREAMING_SNAKE_CASE , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE ) if value: if len(__SCREAMING_SNAKE_CASE ) == 1: UpperCamelCase : str = value[0] UpperCamelCase : str = value else: # leaf nodes UpperCamelCase : Optional[int] = [] for leaf in content.split(R'''<sep/>''' ): UpperCamelCase : Optional[int] = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": UpperCamelCase : int = leaf[1:-2] # for categorical special tokens output[key].append(__SCREAMING_SNAKE_CASE ) if len(output[key] ) == 1: UpperCamelCase : Tuple = output[key][0] UpperCamelCase : List[Any] = tokens[tokens.find(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def _lowercase ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def _lowercase ( self ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor
315
1
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCAmelCase_ : '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=33 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ): """simple docstring""" UpperCamelCase : Optional[int] = parent UpperCamelCase : Any = batch_size UpperCamelCase : Dict = seq_length UpperCamelCase : Tuple = is_training UpperCamelCase : List[Any] = use_input_mask UpperCamelCase : Union[str, Any] = use_token_type_ids UpperCamelCase : str = use_labels UpperCamelCase : Dict = vocab_size UpperCamelCase : Any = hidden_size UpperCamelCase : Optional[int] = num_hidden_layers UpperCamelCase : List[Any] = num_attention_heads UpperCamelCase : List[str] = intermediate_size UpperCamelCase : Any = hidden_act UpperCamelCase : Tuple = hidden_dropout_prob UpperCamelCase : List[Any] = attention_probs_dropout_prob UpperCamelCase : List[str] = max_position_embeddings UpperCamelCase : Optional[Any] = type_vocab_size UpperCamelCase : str = type_sequence_label_size UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : Optional[int] = num_labels UpperCamelCase : Optional[int] = num_choices UpperCamelCase : Optional[int] = scope def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : int = None if self.use_input_mask: UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase : Tuple = None UpperCamelCase : Tuple = None UpperCamelCase : int = None if self.use_labels: UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase : Union[str, Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self ): """simple docstring""" return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = EsmModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : str = EsmForMaskedLM(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : str = self.num_labels UpperCamelCase : Dict = EsmForTokenClassification(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Optional[Any] = config_and_inputs UpperCamelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a, _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : Dict = False __UpperCamelCase : Union[str, Any] = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) __UpperCamelCase : int = () __UpperCamelCase : List[str] = ( { "feature-extraction": EsmModel, "fill-mask": EsmForMaskedLM, "text-classification": EsmForSequenceClassification, "token-classification": EsmForTokenClassification, "zero-shot": EsmForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase : Dict = True def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = EsmModelTester(self ) UpperCamelCase : str = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def _lowercase ( self ): """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase : List[Any] = type self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE ) @slow def _lowercase ( self ): """simple docstring""" for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : int = EsmModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()[0] UpperCamelCase : Optional[int] = EsmEmbeddings(config=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) UpperCamelCase : Any = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) UpperCamelCase : Optional[int] = create_position_ids_from_input_ids(__SCREAMING_SNAKE_CASE , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()[0] UpperCamelCase : Tuple = EsmEmbeddings(config=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = torch.empty(2 , 4 , 30 ) UpperCamelCase : Any = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] UpperCamelCase : Union[str, Any] = torch.as_tensor([expected_single_positions, expected_single_positions] ) UpperCamelCase : List[Any] = embeddings.create_position_ids_from_inputs_embeds(__SCREAMING_SNAKE_CASE ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) ) @unittest.skip('''Esm does not support embedding resizing''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip('''Esm does not support embedding resizing''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _lowercase ( self ): """simple docstring""" pass @require_torch class UpperCAmelCase_ ( _a): '''simple docstring''' @slow def _lowercase ( self ): """simple docstring""" with torch.no_grad(): UpperCamelCase : Dict = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) model.eval() UpperCamelCase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] ) UpperCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE )[0] UpperCamelCase : List[str] = 33 UpperCamelCase : Dict = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = torch.tensor( [[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def _lowercase ( self ): """simple docstring""" with torch.no_grad(): UpperCamelCase : Optional[Any] = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) model.eval() UpperCamelCase : List[str] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. UpperCamelCase : Optional[int] = torch.tensor( [[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
315
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase : Union[str, Any] = { "configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"], "processing_mgp_str": ["MgpstrProcessor"], "tokenization_mgp_str": ["MgpstrTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Union[str, Any] = [ "MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST", "MgpstrModel", "MgpstrPreTrainedModel", "MgpstrForSceneTextRecognition", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys __UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
315
1
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def a ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(SCREAMING_SNAKE_CASE_ ): requests.request('''GET''' , '''https://huggingface.co''' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 ) @pytest.mark.integration def a ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('''GET''' , '''https://huggingface.co''' ) def a ( ): """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_head('''https://huggingface.co''' )
315
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ): """simple docstring""" UpperCamelCase : List[str] = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
315
1
import math def a ( SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase : List[str] = [True] * n UpperCamelCase : List[Any] = False UpperCamelCase : Any = False UpperCamelCase : Optional[Any] = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): UpperCamelCase : Union[str, Any] = i * 2 while index < n: UpperCamelCase : Union[str, Any] = False UpperCamelCase : int = index + i UpperCamelCase : Tuple = [2] for i in range(3 , SCREAMING_SNAKE_CASE_ , 2 ): if is_prime[i]: primes.append(SCREAMING_SNAKE_CASE_ ) return primes def a ( SCREAMING_SNAKE_CASE_ : int = 9_9_9_9_6_6_6_6_3_3_3_3 ): """simple docstring""" UpperCamelCase : Any = math.floor(math.sqrt(SCREAMING_SNAKE_CASE_ ) ) + 1_0_0 UpperCamelCase : Union[str, Any] = prime_sieve(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = 0 UpperCamelCase : Dict = 0 UpperCamelCase : Tuple = primes[prime_index] while (last_prime**2) <= limit: UpperCamelCase : Union[str, Any] = primes[prime_index + 1] UpperCamelCase : Any = last_prime**2 UpperCamelCase : Dict = next_prime**2 # Get numbers divisible by lps(current) UpperCamelCase : Union[str, Any] = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) UpperCamelCase : str = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps UpperCamelCase : List[str] = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair UpperCamelCase : Optional[int] = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
315
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = eval_examples UpperCamelCase : Optional[Any] = post_process_function def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ): """simple docstring""" UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCamelCase : Any = self.compute_metrics UpperCamelCase : List[Any] = None UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCamelCase : Dict = time.time() try: UpperCamelCase : str = eval_loop( __SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: UpperCamelCase : Union[str, Any] = compute_metrics UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions ) UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE ) metrics.update(output.metrics ) else: UpperCamelCase : List[Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(__SCREAMING_SNAKE_CASE ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE ) return metrics def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ): """simple docstring""" UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE ) # Temporarily disable metric computation, we will do it in the loop here. UpperCamelCase : Union[str, Any] = self.compute_metrics UpperCamelCase : Tuple = None UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCamelCase : Optional[int] = time.time() try: UpperCamelCase : int = eval_loop( __SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: UpperCamelCase : int = compute_metrics UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' ) UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
315
1
import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument __UpperCAmelCase : str = { "/attention/": "/0/SelfAttention/", "/self_attention/": "/0/SelfAttention/", "/encoder_decoder_attention/": "/1/EncDecAttention/", "value": "v", "query": "q", "key": "k", "out": "o", "pre_self_attention_layer_norm": "0/layer_norm", "pre_cross_attention_layer_norm": "1/layer_norm", "pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong "token_embedder": "shared", "encoder_norm": "final_layer_norm", "decoder_norm": "final_layer_norm", "relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight", "router/router_weights/w/": "router/classifier/", "roer/roer_weights/w/": "router/classifier/", "logits_dense": "lm_head", } def a ( SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" UpperCamelCase : int = list(s_dict.keys() ) for key in keys: UpperCamelCase : Optional[Any] = R'''.*/layers_(\d+)''' UpperCamelCase : Any = key if re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : str = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = R'''(encoder|decoder)\/''' if re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Dict = re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).groups() if groups[0] == "encoder": UpperCamelCase : Optional[int] = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , SCREAMING_SNAKE_CASE_ ) elif groups[0] == "decoder": UpperCamelCase : Union[str, Any] = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , SCREAMING_SNAKE_CASE_ ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: UpperCamelCase : Union[str, Any] = new_key.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print(F"""{key} -> {new_key}""" ) UpperCamelCase : Optional[Any] = s_dict.pop(SCREAMING_SNAKE_CASE_ ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCamelCase : Tuple = s_dict[ '''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCamelCase : str = s_dict[ '''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: UpperCamelCase : List[Any] = s_dict[key].shape[0] UpperCamelCase : Any = s_dict[key] for idx in range(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[Any] = expert_weihts[idx] print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" ) s_dict.pop(SCREAMING_SNAKE_CASE_ ) return s_dict __UpperCAmelCase : Any = { "NUM_ENCODER_LAYERS": "num_layers", "NUM_DECODER_LAYERS": "num_decoder_layers", "NUM_HEADS": "num_heads", "HEAD_DIM": "d_kv", "EMBED_DIM": "d_model", "MLP_DIM": "d_ff", "NUM_SELECTED_EXPERTS": "num_selected_experts", "NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers", "NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers", "dense.MlpBlock.activations": "feed_forward_proj", } def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" import regex as re with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f: UpperCamelCase : int = f.read() UpperCamelCase : Optional[int] = re.findall(R'''(.*) = ([0-9.]*)''' , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": UpperCamelCase : Any = float(SCREAMING_SNAKE_CASE_ ) if '''.''' in value else int(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase : Tuple = str(activation[1] ) UpperCamelCase : List[Any] = num_experts UpperCamelCase : Optional[Any] = SwitchTransformersConfig(**SCREAMING_SNAKE_CASE_ ) return config def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : int="./" , SCREAMING_SNAKE_CASE_ : str=8 ): """simple docstring""" print(F"""Loading flax weights from : {flax_checkpoint_path}""" ) UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ ) if gin_file is not None: UpperCamelCase : Optional[int] = convert_gin_to_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase : Tuple = SwitchTransformersConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = SwitchTransformersForConditionalGeneration(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = flax_params['''target'''] UpperCamelCase : int = flatten_dict(SCREAMING_SNAKE_CASE_ , sep='''/''' ) UpperCamelCase : str = rename_keys(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = unflatten_dict(SCREAMING_SNAKE_CASE_ , sep='''/''' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) pt_model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": __UpperCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the" " model architecture. If not provided, a `gin_file` has to be provided." ), ) parser.add_argument( "--gin_file", default=None, type=str, required=False, help="Path to the gin config file. If not provided, a `config_file` has to be passed ", ) parser.add_argument( "--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model." ) parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts") __UpperCAmelCase : Tuple = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
315
from __future__ import annotations import collections import pprint from pathlib import Path def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return "".join(sorted(SCREAMING_SNAKE_CASE_ ) ) def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return word_by_signature[signature(SCREAMING_SNAKE_CASE_ )] __UpperCAmelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8") __UpperCAmelCase : Tuple = sorted({word.strip().lower() for word in data.splitlines()}) __UpperCAmelCase : Union[str, Any] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": __UpperCAmelCase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("anagrams.txt", "w") as file: file.write("all_anagrams = \n ") file.write(pprint.pformat(all_anagrams))
315
1
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def a ( SCREAMING_SNAKE_CASE_ : dict ): """simple docstring""" return (data["data"], data["target"]) def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ): """simple docstring""" UpperCamelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 ) xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Predict target for test data UpperCamelCase : Any = xgb.predict(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 ) return predictions def a ( ): """simple docstring""" UpperCamelCase : Tuple = fetch_california_housing() UpperCamelCase , UpperCamelCase : Tuple = data_handling(SCREAMING_SNAKE_CASE_ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = train_test_split( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 ) UpperCamelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Error printing print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" ) print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
315
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ): """simple docstring""" UpperCamelCase : list[list[float]] = [] for data in source_data: for i, el in enumerate(SCREAMING_SNAKE_CASE_ ): if len(SCREAMING_SNAKE_CASE_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) ) return data_lists def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ): """simple docstring""" UpperCamelCase : list[list[float]] = [] for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: UpperCamelCase : Dict = F"""Invalid weight of {weight:f} provided""" raise ValueError(SCREAMING_SNAKE_CASE_ ) score_lists.append(SCREAMING_SNAKE_CASE_ ) return score_lists def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ): """simple docstring""" UpperCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : str = final_scores[j] + ele return final_scores def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ): """simple docstring""" UpperCamelCase : str = get_data(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = generate_final_scores(SCREAMING_SNAKE_CASE_ ) # append scores to source data for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ): source_data[i].append(SCREAMING_SNAKE_CASE_ ) return source_data
315
1
import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : int = 0 __UpperCamelCase : bool = False __UpperCamelCase : float = 3.0 class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def _lowercase ( self ): """simple docstring""" self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} ) self.assertDictEqual(MockClass(a=2 , b=__SCREAMING_SNAKE_CASE ).to_kwargs() , {'''a''': 2, '''b''': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} ) @require_cuda def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = GradScalerKwargs(init_scale=1_024 , growth_factor=2 ) AcceleratorState._reset_state() UpperCamelCase : str = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) UpperCamelCase : Optional[Any] = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1_024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2_000 ) self.assertEqual(scaler._enabled , __SCREAMING_SNAKE_CASE ) @require_multi_gpu def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() ) if __name__ == "__main__": __UpperCAmelCase : Any = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) __UpperCAmelCase : Any = Accelerator(kwargs_handlers=[ddp_scaler]) __UpperCAmelCase : str = torch.nn.Linear(100, 200) __UpperCAmelCase : Any = accelerator.prepare(model) # Check the values changed in kwargs __UpperCAmelCase : Optional[Any] = "" __UpperCAmelCase : Dict = model.bucket_bytes_cap // (1024 * 1024) if observed_bucket_cap_map != 15: error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
315
import glob import os import random from string import ascii_lowercase, digits import cva __UpperCAmelCase : Optional[int] = "" __UpperCAmelCase : Union[str, Any] = "" __UpperCAmelCase : Optional[int] = "" __UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal) def a ( ): """simple docstring""" UpperCamelCase , UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print('''Processing...''' ) UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for index, image in enumerate(SCREAMING_SNAKE_CASE_ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' UpperCamelCase : Optional[int] = random_chars(3_2 ) UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] ) print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" ) UpperCamelCase : Any = [] for anno in new_annos[index]: UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(SCREAMING_SNAKE_CASE_ ) with open(F"""/{file_root}.txt""" , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" UpperCamelCase : Any = [] UpperCamelCase : Union[str, Any] = [] for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ): UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(SCREAMING_SNAKE_CASE_ ) as in_file: UpperCamelCase : List[str] = in_file.readlines() UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" ) UpperCamelCase : Union[str, Any] = [] for obj_list in obj_lists: UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(SCREAMING_SNAKE_CASE_ ) labels.append(SCREAMING_SNAKE_CASE_ ) return img_paths, labels def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ): """simple docstring""" UpperCamelCase : List[Any] = [] UpperCamelCase : str = [] UpperCamelCase : int = [] for idx in range(len(SCREAMING_SNAKE_CASE_ ) ): UpperCamelCase : Tuple = [] UpperCamelCase : Optional[int] = img_list[idx] path_list.append(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = anno_list[idx] UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ ) if flip_type == 1: UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for bbox in img_annos: UpperCamelCase : Optional[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for bbox in img_annos: UpperCamelCase : Union[str, Any] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(SCREAMING_SNAKE_CASE_ ) new_imgs_list.append(SCREAMING_SNAKE_CASE_ ) return new_imgs_list, new_annos_lists, path_list def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" UpperCamelCase : Any = ascii_lowercase + digits return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": main() print("DONE ✅")
315
1
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = eval_examples UpperCamelCase : Optional[Any] = post_process_function def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ): """simple docstring""" UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCamelCase : Any = self.compute_metrics UpperCamelCase : List[Any] = None UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCamelCase : Dict = time.time() try: UpperCamelCase : str = eval_loop( __SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: UpperCamelCase : Union[str, Any] = compute_metrics UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions ) UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE ) metrics.update(output.metrics ) else: UpperCamelCase : List[Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(__SCREAMING_SNAKE_CASE ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE ) return metrics def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ): """simple docstring""" UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE ) # Temporarily disable metric computation, we will do it in the loop here. UpperCamelCase : Union[str, Any] = self.compute_metrics UpperCamelCase : Tuple = None UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCamelCase : Optional[int] = time.time() try: UpperCamelCase : int = eval_loop( __SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: UpperCamelCase : int = compute_metrics UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' ) UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
315
import qiskit def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' ) UpperCamelCase : Any = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment return job.result().get_counts(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": __UpperCAmelCase : int = half_adder(1, 1) print(f'''Half Adder Output Qubit Counts: {counts}''')
315
1
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def a ( SCREAMING_SNAKE_CASE_ : int=None ): """simple docstring""" if subparsers is not None: UpperCamelCase : int = subparsers.add_parser('''env''' ) else: UpperCamelCase : List[str] = argparse.ArgumentParser('''Accelerate env command''' ) parser.add_argument( '''--config_file''' , default=SCREAMING_SNAKE_CASE_ , help='''The config file to use for the default values in the launching script.''' ) if subparsers is not None: parser.set_defaults(func=SCREAMING_SNAKE_CASE_ ) return parser def a ( SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" UpperCamelCase : Tuple = torch.__version__ UpperCamelCase : str = torch.cuda.is_available() UpperCamelCase : Dict = is_xpu_available() UpperCamelCase : int = is_npu_available() UpperCamelCase : List[Any] = '''Not found''' # Get the default from the config file. if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : List[str] = load_config_from_file(args.config_file ).to_dict() UpperCamelCase : Union[str, Any] = { '''`Accelerate` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Numpy version''': np.__version__, '''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""", '''PyTorch XPU available''': str(SCREAMING_SNAKE_CASE_ ), '''PyTorch NPU available''': str(SCREAMING_SNAKE_CASE_ ), '''System RAM''': F"""{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB""", } if pt_cuda_available: UpperCamelCase : str = torch.cuda.get_device_name() print('''\nCopy-and-paste the text below in your GitHub issue\n''' ) print('''\n'''.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) ) print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' ) UpperCamelCase : List[Any] = ( '''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else F"""\t{accelerate_config}""" ) print(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = accelerate_config return info def a ( ): """simple docstring""" UpperCamelCase : Optional[int] = env_command_parser() UpperCamelCase : Optional[int] = parser.parse_args() env_command(SCREAMING_SNAKE_CASE_ ) return 0 if __name__ == "__main__": raise SystemExit(main())
315
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __UpperCAmelCase : str = logging.get_logger(__name__) def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ): """simple docstring""" UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() ) class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[str] = CLIPConfig __UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"] def __init__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = CLIPVisionModel(config.vision_config ) UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE ) @torch.no_grad() def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy() UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy() UpperCamelCase : Dict = [] UpperCamelCase : List[str] = image_embeds.shape[0] for i in range(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images UpperCamelCase : Optional[int] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): UpperCamelCase : List[str] = special_cos_dist[i][concept_idx] UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item() UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} ) UpperCamelCase : Optional[int] = 0.01 for concept_idx in range(len(cos_dist[0] ) ): UpperCamelCase : Optional[int] = cos_dist[i][concept_idx] UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item() UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE ) result.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ) UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images UpperCamelCase : Union[str, Any] = 0.0 UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 ) UpperCamelCase : int = special_care * 0.01 UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
315
1
import collections import os import re from pathlib import Path __UpperCAmelCase : List[str] = "src/transformers" # Matches is_xxx_available() __UpperCAmelCase : int = re.compile(r"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} __UpperCAmelCase : Optional[int] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __UpperCAmelCase : List[Any] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available __UpperCAmelCase : List[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") __UpperCAmelCase : str = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", __UpperCAmelCase : Dict = re.compile(r"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], __UpperCAmelCase : str = re.compile(r"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo __UpperCAmelCase : str = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: __UpperCAmelCase : Any = re.compile(r"^\s*try:") # Catches a line with else: __UpperCAmelCase : List[Any] = re.compile(r"^\s*else:") def a ( SCREAMING_SNAKE_CASE_ : Dict ): """simple docstring""" if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None: return None UpperCamelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )] backends.sort() return "_and_".join(SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCamelCase : Tuple = f.readlines() UpperCamelCase : Tuple = 0 while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(SCREAMING_SNAKE_CASE_ ): return None # First grab the objects without a specific backend in _import_structure UpperCamelCase : List[Any] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: UpperCamelCase : Optional[int] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0] UpperCamelCase : str = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue UpperCamelCase : List[Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 UpperCamelCase : Dict = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCamelCase : Dict = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCamelCase : Optional[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCamelCase : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): UpperCamelCase : str = lines[line_index] if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None: objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] ) elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None: UpperCamelCase : Union[str, Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' ) UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None: UpperCamelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' ) UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None: objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 1_2 + '''"''' ): objects.append(line[1_3:-3] ) line_index += 1 UpperCamelCase : Tuple = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCamelCase : int = [] while ( line_index < len(SCREAMING_SNAKE_CASE_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): UpperCamelCase : Tuple = lines[line_index] UpperCamelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCamelCase : Any = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(SCREAMING_SNAKE_CASE_ ): # If the line is an if is_backend_available, we grab all objects associated. UpperCamelCase : Optional[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCamelCase : Dict = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCamelCase : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): UpperCamelCase : Optional[Any] = lines[line_index] UpperCamelCase : str = _re_import.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 1_2 ): objects.append(line[1_2:-2] ) line_index += 1 UpperCamelCase : str = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ): """simple docstring""" def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ): return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCamelCase : Dict = [] for key in import_dict_objects.keys(): UpperCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCamelCase : List[str] = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def a ( ): """simple docstring""" UpperCamelCase : Any = [] for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ): if "__init__.py" in files: UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) UpperCamelCase : Optional[int] = parse_init(SCREAMING_SNAKE_CASE_ ) if objects is not None: UpperCamelCase : str = analyze_results(*SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: UpperCamelCase : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) if len(SCREAMING_SNAKE_CASE_ ) > 0: raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) ) def a ( ): """simple docstring""" UpperCamelCase : Dict = [] for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(SCREAMING_SNAKE_CASE_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0: continue UpperCamelCase : List[str] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : str = short_path.replace(os.path.sep , '''.''' ) submodules.append(SCREAMING_SNAKE_CASE_ ) for fname in files: if fname == "__init__.py": continue UpperCamelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(SCREAMING_SNAKE_CASE_ ) return submodules __UpperCAmelCase : Optional[int] = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def a ( ): """simple docstring""" from transformers.utils import direct_transformers_import UpperCamelCase : Tuple = direct_transformers_import(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , '''r''' ) as f: UpperCamelCase : List[Any] = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE_ ) ) ) UpperCamelCase : Union[str, Any] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(SCREAMING_SNAKE_CASE_ ) > 0: UpperCamelCase : str = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
315
from argparse import ArgumentParser from .env import EnvironmentCommand def a ( ): """simple docstring""" UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' ) UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' ) # Register commands EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ ) # Let's go UpperCamelCase : List[Any] = parser.parse_args() if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ): parser.print_help() exit(1 ) # Run UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ ) service.run() if __name__ == "__main__": main()
315
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=_a) class UpperCAmelCase_ ( _a): '''simple docstring''' # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __UpperCamelCase : str = field(default="text-classification", metadata={"include_in_asdict_even_if_is_default": True}) __UpperCamelCase : ClassVar[Features] = Features({"text": Value("string")}) __UpperCamelCase : ClassVar[Features] = Features({"labels": ClassLabel}) __UpperCamelCase : str = "text" __UpperCamelCase : str = "labels" def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if self.label_column not in features: raise ValueError(f"""Column {self.label_column} is not present in features.""" ) if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ): raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" ) UpperCamelCase : int = copy.deepcopy(self ) UpperCamelCase : Tuple = self.label_schema.copy() UpperCamelCase : List[Any] = features[self.label_column] UpperCamelCase : Optional[Any] = label_schema return task_template @property def _lowercase ( self ): """simple docstring""" return { self.text_column: "text", self.label_column: "labels", }
315
def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return "".join(chr(ord(SCREAMING_SNAKE_CASE_ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
315
1
import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor __UpperCAmelCase : Optional[int] = random.Random() def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str]=1.0 , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=None ): """simple docstring""" if rng is None: UpperCamelCase : Any = global_rng UpperCamelCase : Union[str, Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=2_000 , __SCREAMING_SNAKE_CASE=24 , __SCREAMING_SNAKE_CASE=24 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=16_000 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , ): """simple docstring""" UpperCamelCase : Optional[Any] = parent UpperCamelCase : Union[str, Any] = batch_size UpperCamelCase : List[Any] = min_seq_length UpperCamelCase : str = max_seq_length UpperCamelCase : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Optional[Any] = feature_size UpperCamelCase : Dict = num_mel_bins UpperCamelCase : List[str] = padding_value UpperCamelCase : str = sampling_rate UpperCamelCase : List[str] = return_attention_mask UpperCamelCase : Optional[Any] = do_normalize def _lowercase ( self ): """simple docstring""" return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _lowercase ( self , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False ): """simple docstring""" def _flatten(__SCREAMING_SNAKE_CASE ): return list(itertools.chain(*__SCREAMING_SNAKE_CASE ) ) if equal_length: UpperCamelCase : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size UpperCamelCase : Dict = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCamelCase : List[Any] = [np.asarray(__SCREAMING_SNAKE_CASE ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCAmelCase_ ( _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : Tuple = SpeechaTextFeatureExtractor if is_speech_available() else None def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = SpeechaTextFeatureExtractionTester(self ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" self.assertTrue(np.all(np.mean(__SCREAMING_SNAKE_CASE , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(__SCREAMING_SNAKE_CASE , axis=0 ) - 1 ) < 1e-3 ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] UpperCamelCase : Tuple = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs] # Test feature size UpperCamelCase : Tuple = feature_extractor(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input UpperCamelCase : Any = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features UpperCamelCase : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # Test batched UpperCamelCase : int = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features UpperCamelCase : Optional[int] = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : Optional[Any] = np.asarray(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features UpperCamelCase : Optional[Any] = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] UpperCamelCase : int = ['''longest''', '''max_length''', '''do_not_pad'''] UpperCamelCase : str = [None, 16, None] for max_length, padding in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : Dict = feature_extractor( __SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = inputs.input_features UpperCamelCase : Any = inputs.attention_mask UpperCamelCase : Optional[Any] = [np.sum(__SCREAMING_SNAKE_CASE ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] UpperCamelCase : Optional[Any] = ['''longest''', '''max_length''', '''do_not_pad'''] UpperCamelCase : Dict = [None, 16, None] for max_length, padding in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : str = feature_extractor( __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''np''' , return_attention_mask=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = inputs.input_features UpperCamelCase : str = inputs.attention_mask UpperCamelCase : Union[str, Any] = [np.sum(__SCREAMING_SNAKE_CASE ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] UpperCamelCase : List[Any] = feature_extractor( __SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=4 , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''np''' , return_attention_mask=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Union[str, Any] = inputs.input_features UpperCamelCase : Dict = inputs.attention_mask UpperCamelCase : List[str] = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] UpperCamelCase : Optional[Any] = feature_extractor( __SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=4 , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''np''' , return_attention_mask=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Optional[int] = inputs.input_features UpperCamelCase : Any = inputs.attention_mask UpperCamelCase : Optional[int] = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) UpperCamelCase : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] UpperCamelCase : Any = feature_extractor( __SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=16 , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''np''' , return_attention_mask=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[str] = inputs.input_features UpperCamelCase : Union[str, Any] = inputs.attention_mask UpperCamelCase : Dict = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def _lowercase ( self ): """simple docstring""" import torch UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Any = np.random.rand(100 , 32 ).astype(np.floataa ) UpperCamelCase : Tuple = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) UpperCamelCase : List[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" from datasets import load_dataset UpperCamelCase : Tuple = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech UpperCamelCase : Optional[Any] = ds.sort('''id''' ).select(range(__SCREAMING_SNAKE_CASE ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = np.array([ -1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241, -1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128, -1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625, ] ) # fmt: on UpperCamelCase : Dict = self._load_datasamples(1 ) UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : List[str] = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_features self.assertEquals(input_features.shape , (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
315
import math def a ( SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" UpperCamelCase : Tuple = factor * value UpperCamelCase : Optional[int] = value while not is_prime(SCREAMING_SNAKE_CASE_ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ ) return value
315
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase : Tuple = logging.get_logger(__name__) __UpperCAmelCase : Any = { "facebook/deit-base-distilled-patch16-224": ( "https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json" ), # See all DeiT models at https://huggingface.co/models?filter=deit } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[Any] = "deit" def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=16 , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = hidden_size UpperCamelCase : Optional[Any] = num_hidden_layers UpperCamelCase : List[Any] = num_attention_heads UpperCamelCase : Union[str, Any] = intermediate_size UpperCamelCase : Union[str, Any] = hidden_act UpperCamelCase : Optional[Any] = hidden_dropout_prob UpperCamelCase : Tuple = attention_probs_dropout_prob UpperCamelCase : Optional[int] = initializer_range UpperCamelCase : Any = layer_norm_eps UpperCamelCase : List[Any] = image_size UpperCamelCase : Dict = patch_size UpperCamelCase : Dict = num_channels UpperCamelCase : Any = qkv_bias UpperCamelCase : Tuple = encoder_stride class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Optional[Any] = version.parse("1.11") @property def _lowercase ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _lowercase ( self ): """simple docstring""" return 1e-4
315
import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor __UpperCAmelCase : Optional[int] = logging.get_logger(__name__) class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" warnings.warn( '''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , ) super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
315
1
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device __UpperCAmelCase : Optional[int] = False class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' pass @nightly @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def _lowercase ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' ) # remove text_unet pipe.remove_unused_weights() pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = '''A painting of a squirrel eating a burger ''' UpperCamelCase : List[str] = torch.manual_seed(0 ) UpperCamelCase : Optional[int] = pipe( prompt=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = generator.manual_seed(0 ) UpperCamelCase : Optional[int] = pipe( prompt=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained( '''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = '''A painting of a squirrel eating a burger ''' UpperCamelCase : int = torch.manual_seed(0 ) UpperCamelCase : Union[str, Any] = pipe( prompt=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images UpperCamelCase : List[str] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase : Optional[int] = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
315
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ): """simple docstring""" UpperCamelCase : List[str] = size if size is not None else {'''height''': 18, '''width''': 18} UpperCamelCase : int = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : Optional[int] = num_channels UpperCamelCase : Union[str, Any] = image_size UpperCamelCase : Union[str, Any] = min_resolution UpperCamelCase : Tuple = max_resolution UpperCamelCase : List[str] = do_resize UpperCamelCase : List[str] = size UpperCamelCase : int = apply_ocr def _lowercase ( self ): """simple docstring""" return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class UpperCAmelCase_ ( _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = LayoutLMvaImageProcessingTester(self ) @property def _lowercase ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''apply_ocr''' ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def _lowercase ( self ): """simple docstring""" pass def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE ) self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE ) # Test batched UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCamelCase : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCamelCase : Optional[int] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = LayoutLMvaImageProcessor() from datasets import load_dataset UpperCamelCase : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' ) UpperCamelCase : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 UpperCamelCase : Union[str, Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 UpperCamelCase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE ) self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE ) # with apply_OCR = False UpperCamelCase : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
315
1
import torch from transformers import AutoModel class UpperCAmelCase_ ( torch.nn.Module): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ): """simple docstring""" super(__SCREAMING_SNAKE_CASE , self ).__init__() UpperCamelCase : Dict = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = torch.nn.CosineSimilarity(3 , 1e-08 ) UpperCamelCase : int = torch.nn.Softmax(dim=1 ) def _lowercase ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ): """simple docstring""" return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = W_supports['''sizes'''].tolist() UpperCamelCase : Optional[int] = W_supports['''start_token_id'''].item() UpperCamelCase : Dict = W_supports['''end_token_id'''].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] UpperCamelCase : Optional[Any] = self.BERT(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = self.BERT(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = None UpperCamelCase : Dict = None UpperCamelCase : str = W_supports['''input_ids'''] == start_token_id UpperCamelCase : Union[str, Any] = W_supports['''input_ids'''] == end_token_id for i, size in enumerate(__SCREAMING_SNAKE_CASE ): if i == 0: UpperCamelCase : int = 0 else: UpperCamelCase : Tuple = support_sizes[i - 1] UpperCamelCase : Tuple = S[s : s + size][start_token_masks[s : s + size]] UpperCamelCase : int = S[s : s + size][end_token_masks[s : s + size]] UpperCamelCase : Union[str, Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) UpperCamelCase : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: UpperCamelCase : Optional[Any] = torch.vstack((p_starts, p_start) ) UpperCamelCase : List[str] = torch.vstack((p_ends, p_end) ) else: UpperCamelCase : Optional[int] = p_start UpperCamelCase : Any = p_end return p_starts, p_ends
315
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def a ( SCREAMING_SNAKE_CASE_ : dict ): """simple docstring""" return (data["data"], data["target"]) def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ): """simple docstring""" UpperCamelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 ) xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Predict target for test data UpperCamelCase : Any = xgb.predict(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 ) return predictions def a ( ): """simple docstring""" UpperCamelCase : Tuple = fetch_california_housing() UpperCamelCase , UpperCamelCase : Tuple = data_handling(SCREAMING_SNAKE_CASE_ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = train_test_split( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 ) UpperCamelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Error printing print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" ) print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
315
1
import requests from bsa import BeautifulSoup def a ( SCREAMING_SNAKE_CASE_ : str = "https://www.worldometers.info/coronavirus" ): """simple docstring""" UpperCamelCase : Optional[int] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' ) UpperCamelCase : Dict = soup.findAll('''h1''' ) UpperCamelCase : Any = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} ) keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} ) values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} ) return {key.text.strip(): value.text.strip() for key, value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} if __name__ == "__main__": print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n") for key, value in world_covidaa_stats().items(): print(f'''{key}\n{value}\n''')
315
__UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" __UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}] __UpperCAmelCase : Union[str, Any] = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
315
1
import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor __UpperCAmelCase : Optional[int] = logging.get_logger(__name__) class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" warnings.warn( '''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , ) super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
315
import collections import os import re from pathlib import Path __UpperCAmelCase : List[str] = "src/transformers" # Matches is_xxx_available() __UpperCAmelCase : int = re.compile(r"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} __UpperCAmelCase : Optional[int] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __UpperCAmelCase : List[Any] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available __UpperCAmelCase : List[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") __UpperCAmelCase : str = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", __UpperCAmelCase : Dict = re.compile(r"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], __UpperCAmelCase : str = re.compile(r"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo __UpperCAmelCase : str = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: __UpperCAmelCase : Any = re.compile(r"^\s*try:") # Catches a line with else: __UpperCAmelCase : List[Any] = re.compile(r"^\s*else:") def a ( SCREAMING_SNAKE_CASE_ : Dict ): """simple docstring""" if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None: return None UpperCamelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )] backends.sort() return "_and_".join(SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCamelCase : Tuple = f.readlines() UpperCamelCase : Tuple = 0 while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(SCREAMING_SNAKE_CASE_ ): return None # First grab the objects without a specific backend in _import_structure UpperCamelCase : List[Any] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: UpperCamelCase : Optional[int] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0] UpperCamelCase : str = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue UpperCamelCase : List[Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 UpperCamelCase : Dict = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCamelCase : Dict = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCamelCase : Optional[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCamelCase : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): UpperCamelCase : str = lines[line_index] if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None: objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] ) elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None: UpperCamelCase : Union[str, Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' ) UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None: UpperCamelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' ) UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None: objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 1_2 + '''"''' ): objects.append(line[1_3:-3] ) line_index += 1 UpperCamelCase : Tuple = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCamelCase : int = [] while ( line_index < len(SCREAMING_SNAKE_CASE_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): UpperCamelCase : Tuple = lines[line_index] UpperCamelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCamelCase : Any = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(SCREAMING_SNAKE_CASE_ ): # If the line is an if is_backend_available, we grab all objects associated. UpperCamelCase : Optional[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCamelCase : Dict = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCamelCase : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): UpperCamelCase : Optional[Any] = lines[line_index] UpperCamelCase : str = _re_import.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 1_2 ): objects.append(line[1_2:-2] ) line_index += 1 UpperCamelCase : str = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ): """simple docstring""" def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ): return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCamelCase : Dict = [] for key in import_dict_objects.keys(): UpperCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCamelCase : List[str] = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def a ( ): """simple docstring""" UpperCamelCase : Any = [] for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ): if "__init__.py" in files: UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) UpperCamelCase : Optional[int] = parse_init(SCREAMING_SNAKE_CASE_ ) if objects is not None: UpperCamelCase : str = analyze_results(*SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: UpperCamelCase : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) if len(SCREAMING_SNAKE_CASE_ ) > 0: raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) ) def a ( ): """simple docstring""" UpperCamelCase : Dict = [] for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(SCREAMING_SNAKE_CASE_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0: continue UpperCamelCase : List[str] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : str = short_path.replace(os.path.sep , '''.''' ) submodules.append(SCREAMING_SNAKE_CASE_ ) for fname in files: if fname == "__init__.py": continue UpperCamelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(SCREAMING_SNAKE_CASE_ ) return submodules __UpperCAmelCase : Optional[int] = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def a ( ): """simple docstring""" from transformers.utils import direct_transformers_import UpperCamelCase : Tuple = direct_transformers_import(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , '''r''' ) as f: UpperCamelCase : List[Any] = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE_ ) ) ) UpperCamelCase : Union[str, Any] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(SCREAMING_SNAKE_CASE_ ) > 0: UpperCamelCase : str = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
315
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class UpperCAmelCase_ ( _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : int = ShapEImgaImgPipeline __UpperCamelCase : List[Any] = ["image"] __UpperCamelCase : Dict = ["image"] __UpperCamelCase : Any = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] __UpperCamelCase : Dict = False @property def _lowercase ( self ): """simple docstring""" return 32 @property def _lowercase ( self ): """simple docstring""" return 32 @property def _lowercase ( self ): """simple docstring""" return self.time_input_dim * 4 @property def _lowercase ( self ): """simple docstring""" return 8 @property def _lowercase ( self ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase : Union[str, Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) UpperCamelCase : List[Any] = CLIPVisionModel(__SCREAMING_SNAKE_CASE ) return model @property def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = CLIPImageProcessor( crop_size=224 , do_center_crop=__SCREAMING_SNAKE_CASE , do_normalize=__SCREAMING_SNAKE_CASE , do_resize=__SCREAMING_SNAKE_CASE , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , ) return image_processor @property def _lowercase ( self ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase : Union[str, Any] = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } UpperCamelCase : List[str] = PriorTransformer(**__SCREAMING_SNAKE_CASE ) return model @property def _lowercase ( self ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase : str = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } UpperCamelCase : Dict = ShapERenderer(**__SCREAMING_SNAKE_CASE ) return model def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = self.dummy_prior UpperCamelCase : Optional[Any] = self.dummy_image_encoder UpperCamelCase : Any = self.dummy_image_processor UpperCamelCase : List[str] = self.dummy_renderer UpperCamelCase : int = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=__SCREAMING_SNAKE_CASE , clip_sample=__SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , ) UpperCamelCase : Any = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ): """simple docstring""" UpperCamelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ): UpperCamelCase : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: UpperCamelCase : List[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = '''cpu''' UpperCamelCase : Dict = self.get_dummy_components() UpperCamelCase : List[Any] = self.pipeline_class(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) ) UpperCamelCase : Tuple = output.images[0] UpperCamelCase : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCamelCase : str = np.array( [ 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = torch_device == '''cpu''' UpperCamelCase : int = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__SCREAMING_SNAKE_CASE , relax_max_difference=__SCREAMING_SNAKE_CASE , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = self.get_dummy_components() UpperCamelCase : Union[str, Any] = self.pipeline_class(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = 1 UpperCamelCase : int = 2 UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) for key in inputs.keys(): if key in self.batch_params: UpperCamelCase : int = batch_size * [inputs[key]] UpperCamelCase : Tuple = pipe(**__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def _lowercase ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) UpperCamelCase : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) UpperCamelCase : str = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) UpperCamelCase : Optional[int] = pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 ) UpperCamelCase : Union[str, Any] = pipe( __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
315
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" UpperCamelCase : Any = set() # Replace all the whitespace in our sentence UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(SCREAMING_SNAKE_CASE_ ) == 2_6 def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" UpperCamelCase : str = [False] * 2_6 for char in input_str: if char.islower(): UpperCamelCase : List[Any] = True elif char.isupper(): UpperCamelCase : List[Any] = True return all(SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6 def a ( ): """simple docstring""" from timeit import timeit UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest''' print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) ) print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) ) print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
315
1
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class UpperCAmelCase_ ( _a, _a): '''simple docstring''' __UpperCamelCase : List[str] = 1 @register_to_config def __init__( self , __SCREAMING_SNAKE_CASE = 1_000 , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" self.set_timesteps(__SCREAMING_SNAKE_CASE ) # standard deviation of the initial noise distribution UpperCamelCase : List[str] = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. UpperCamelCase : Union[str, Any] = 4 # running values UpperCamelCase : int = [] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : Any = num_inference_steps UpperCamelCase : Optional[Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] UpperCamelCase : Any = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: UpperCamelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: UpperCamelCase : int = torch.sin(steps * math.pi / 2 ) ** 2 UpperCamelCase : List[str] = (1.0 - self.betas**2) ** 0.5 UpperCamelCase : str = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] UpperCamelCase : Dict = timesteps.to(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = [] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , ): """simple docstring""" if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) UpperCamelCase : Any = (self.timesteps == timestep).nonzero().item() UpperCamelCase : Optional[Any] = timestep_index + 1 UpperCamelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(__SCREAMING_SNAKE_CASE ) if len(self.ets ) == 1: UpperCamelCase : int = self.ets[-1] elif len(self.ets ) == 2: UpperCamelCase : str = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: UpperCamelCase : int = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: UpperCamelCase : int = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) UpperCamelCase : str = self._get_prev_sample(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return sample def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[Any] = self.alphas[timestep_index] UpperCamelCase : Dict = self.betas[timestep_index] UpperCamelCase : List[str] = self.alphas[prev_timestep_index] UpperCamelCase : Any = self.betas[prev_timestep_index] UpperCamelCase : Optional[int] = (sample - sigma * ets) / max(__SCREAMING_SNAKE_CASE , 1e-8 ) UpperCamelCase : List[Any] = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
315
import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) __UpperCAmelCase : Union[str, Any] = logging.getLogger() def a ( ): """simple docstring""" UpperCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument('''-f''' ) UpperCamelCase : List[str] = parser.parse_args() return args.f class UpperCAmelCase_ ( _a): '''simple docstring''' def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout ) logger.addHandler(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Dict = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , '''run_glue_deebert.py''' ) with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ): UpperCamelCase : int = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(__SCREAMING_SNAKE_CASE , 0.666 ) @slow @require_torch_non_multi_gpu def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = ''' --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage '''.split() self.run_and_check(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(__SCREAMING_SNAKE_CASE )
315
1
import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCAmelCase_ : '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=0.6 , __SCREAMING_SNAKE_CASE=None , ): """simple docstring""" UpperCamelCase : Tuple = parent UpperCamelCase : Dict = batch_size UpperCamelCase : List[str] = image_size UpperCamelCase : Any = patch_size UpperCamelCase : Optional[Any] = num_channels UpperCamelCase : Union[str, Any] = is_training UpperCamelCase : Optional[int] = use_labels UpperCamelCase : str = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : int = num_attention_heads UpperCamelCase : Union[str, Any] = intermediate_size UpperCamelCase : int = hidden_act UpperCamelCase : Union[str, Any] = hidden_dropout_prob UpperCamelCase : Optional[Any] = attention_probs_dropout_prob UpperCamelCase : int = type_sequence_label_size UpperCamelCase : Tuple = initializer_range UpperCamelCase : List[Any] = mask_ratio UpperCamelCase : Dict = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCamelCase : Any = (image_size // patch_size) ** 2 UpperCamelCase : Optional[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase : Any = None if self.use_labels: UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : Optional[int] = self.get_config() return config, pixel_values, labels def _lowercase ( self ): """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = ViTMAEModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : Dict = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[str] = ViTMAEForPreTraining(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : Dict = model(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = (self.image_size // self.patch_size) ** 2 UpperCamelCase : Dict = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images UpperCamelCase : str = 1 UpperCamelCase : int = ViTMAEForPreTraining(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = config_and_inputs UpperCamelCase : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a, _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : int = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () __UpperCamelCase : Optional[int] = {"feature-extraction": ViTMAEModel} if is_torch_available() else {} __UpperCamelCase : Tuple = False __UpperCamelCase : Any = False __UpperCamelCase : Union[str, Any] = False __UpperCamelCase : Tuple = False def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = ViTMAEModelTester(self ) UpperCamelCase : str = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def _lowercase ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def _lowercase ( self ): """simple docstring""" pass def _lowercase ( self ): """simple docstring""" UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase , UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Tuple = model_class(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase : Tuple = [*signature.parameters.keys()] UpperCamelCase : Dict = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" np.random.seed(2 ) UpperCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) UpperCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCamelCase : Any = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCamelCase : Tuple = pt_noise super().check_pt_tf_models(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): UpperCamelCase : int = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) UpperCamelCase : Dict = outputs[0].cpu().numpy() UpperCamelCase : Optional[Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = model_class.from_pretrained(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): UpperCamelCase : Optional[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) # Make sure we don't have nans UpperCamelCase : str = after_outputs[0].cpu().numpy() UpperCamelCase : List[str] = 0 UpperCamelCase : Dict = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-5 ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _lowercase ( self ): """simple docstring""" pass @slow def _lowercase ( self ): """simple docstring""" for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : Any = ViTMAEModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def a ( ): """simple docstring""" UpperCamelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' @cached_property def _lowercase ( self ): """simple docstring""" return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def _lowercase ( self ): """simple docstring""" np.random.seed(2 ) UpperCamelCase : int = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = self.default_image_processor UpperCamelCase : str = prepare_img() UpperCamelCase : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCamelCase : int = ViTMAEConfig() UpperCamelCase : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) UpperCamelCase : Union[str, Any] = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): UpperCamelCase : Optional[Any] = model(**__SCREAMING_SNAKE_CASE , noise=torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(device=__SCREAMING_SNAKE_CASE ) ) # verify the logits UpperCamelCase : Dict = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) UpperCamelCase : int = torch.tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__SCREAMING_SNAKE_CASE ) , atol=1e-4 ) )
315
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase : Tuple = logging.get_logger(__name__) __UpperCAmelCase : Union[str, Any] = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[Any] = "ibert" def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="none" , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = vocab_size UpperCamelCase : Optional[int] = hidden_size UpperCamelCase : Tuple = num_hidden_layers UpperCamelCase : Optional[Any] = num_attention_heads UpperCamelCase : Dict = hidden_act UpperCamelCase : Union[str, Any] = intermediate_size UpperCamelCase : str = hidden_dropout_prob UpperCamelCase : Any = attention_probs_dropout_prob UpperCamelCase : Dict = max_position_embeddings UpperCamelCase : Union[str, Any] = type_vocab_size UpperCamelCase : Optional[Any] = initializer_range UpperCamelCase : Union[str, Any] = layer_norm_eps UpperCamelCase : Dict = position_embedding_type UpperCamelCase : int = quant_mode UpperCamelCase : Any = force_dequant class UpperCAmelCase_ ( _a): '''simple docstring''' @property def _lowercase ( self ): """simple docstring""" if self.task == "multiple-choice": UpperCamelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
315
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase : int = logging.get_logger(__name__) __UpperCAmelCase : Optional[int] = { "uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json", "uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json", "uclanlp/visualbert-vqa-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json" ), "uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json", "uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json", "uclanlp/visualbert-vcr-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json" ), "uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json", "uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json", "uclanlp/visualbert-nlvr2-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json" ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Any = "visual_bert" def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = vocab_size UpperCamelCase : Union[str, Any] = max_position_embeddings UpperCamelCase : Optional[Any] = hidden_size UpperCamelCase : Optional[int] = visual_embedding_dim UpperCamelCase : str = num_hidden_layers UpperCamelCase : Optional[int] = num_attention_heads UpperCamelCase : Tuple = intermediate_size UpperCamelCase : Optional[int] = hidden_act UpperCamelCase : Tuple = hidden_dropout_prob UpperCamelCase : List[Any] = attention_probs_dropout_prob UpperCamelCase : Tuple = initializer_range UpperCamelCase : Tuple = type_vocab_size UpperCamelCase : List[str] = layer_norm_eps UpperCamelCase : Tuple = bypass_transformer UpperCamelCase : Any = special_visual_initialize
315
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup __UpperCAmelCase : int = logging.get_logger(__name__) class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[Any] = [] UpperCamelCase : int = [] UpperCamelCase : List[Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag UpperCamelCase : Tuple = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) ) UpperCamelCase : Optional[Any] = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[Any] = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' ) UpperCamelCase : Union[str, Any] = [] UpperCamelCase : List[str] = [] UpperCamelCase : str = [] for element in html_code.descendants: if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue UpperCamelCase : Any = html.unescape(__SCREAMING_SNAKE_CASE ).strip() if not text_in_this_tag: continue all_doc_strings.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase , UpperCamelCase : int = self.xpath_soup(__SCREAMING_SNAKE_CASE ) stringaxtag_seq.append(__SCREAMING_SNAKE_CASE ) stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[Any] = '''''' for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): xpath += f"""/{tagname}""" if subs != 0: xpath += f"""[{subs}]""" return xpath def __call__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = False # Check that strings has a valid type if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[Any] = True elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ): if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ): UpperCamelCase : List[str] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' f"""but is of type {type(__SCREAMING_SNAKE_CASE )}.""" ) UpperCamelCase : int = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) ) if not is_batched: UpperCamelCase : Union[str, Any] = [html_strings] # Get nodes + xpaths UpperCamelCase : str = [] UpperCamelCase : int = [] for html_string in html_strings: UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self.get_three_from_single(__SCREAMING_SNAKE_CASE ) nodes.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = [] for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : str = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) xpath_strings.append(__SCREAMING_SNAKE_CASE ) xpaths.append(__SCREAMING_SNAKE_CASE ) # return as Dict UpperCamelCase : List[str] = {'''nodes''': nodes, '''xpaths''': xpaths} UpperCamelCase : List[Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) return encoded_inputs
315
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __UpperCAmelCase : Optional[int] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") @dataclass class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : Optional[str] = field( default="cifar10", metadata={"help": "Name of a dataset from the datasets package"}) __UpperCamelCase : Optional[str] = field( default=_a, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}) __UpperCamelCase : Optional[str] = field( default=_a, metadata={"help": "The column name of the images in the files."}) __UpperCamelCase : Optional[str] = field(default=_a, metadata={"help": "A folder containing the training data."}) __UpperCamelCase : Optional[str] = field(default=_a, metadata={"help": "A folder containing the validation data."}) __UpperCamelCase : Optional[float] = field( default=0.1_5, metadata={"help": "Percent to split off of train for validation."}) __UpperCamelCase : Optional[int] = field( default=_a, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) __UpperCamelCase : Optional[int] = field( default=_a, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = {} if self.train_dir is not None: UpperCamelCase : Union[str, Any] = self.train_dir if self.validation_dir is not None: UpperCamelCase : Any = self.validation_dir UpperCamelCase : List[Any] = data_files if data_files else None @dataclass class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : str = field( default=_a, metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) }, ) __UpperCamelCase : Optional[str] = field( default=_a, metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"}) __UpperCamelCase : Optional[str] = field( default=_a, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) __UpperCamelCase : Optional[str] = field( default=_a, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}) __UpperCamelCase : str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) __UpperCamelCase : str = field(default=_a, metadata={"help": "Name or path of preprocessor config."}) __UpperCamelCase : bool = field( default=_a, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) __UpperCamelCase : float = field( default=0.7_5, metadata={"help": "The ratio of the number of masked tokens in the input sequence."}) __UpperCamelCase : bool = field( default=_a, metadata={"help": "Whether or not to train with normalized pixel values as target."}) @dataclass class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : float = field( default=1E-3, metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."}) def a ( SCREAMING_SNAKE_CASE_ : List[Any] ): """simple docstring""" UpperCamelCase : Any = torch.stack([example['''pixel_values'''] for example in examples] ) return {"pixel_values": pixel_values} def a ( ): """simple docstring""" UpperCamelCase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mae''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase : List[str] = training_args.get_process_log_level() logger.setLevel(SCREAMING_SNAKE_CASE_ ) transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. UpperCamelCase : int = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. UpperCamelCase : int = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. UpperCamelCase : List[str] = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE_ ) and data_args.train_val_split > 0.0: UpperCamelCase : Optional[int] = ds['''train'''].train_test_split(data_args.train_val_split ) UpperCamelCase : List[str] = split['''train'''] UpperCamelCase : Dict = split['''test'''] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase : int = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name: UpperCamelCase : List[Any] = ViTMAEConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE_ ) elif model_args.model_name_or_path: UpperCamelCase : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase : List[str] = ViTMAEConfig() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(F"""New config: {config}""" ) # adapt config config.update( { '''mask_ratio''': model_args.mask_ratio, '''norm_pix_loss''': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: UpperCamelCase : List[Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE_ ) elif model_args.model_name_or_path: UpperCamelCase : Optional[Any] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase : Dict = ViTImageProcessor() # create model if model_args.model_name_or_path: UpperCamelCase : Optional[int] = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) UpperCamelCase : int = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE_ ) if training_args.do_train: UpperCamelCase : Optional[Any] = ds['''train'''].column_names else: UpperCamelCase : Dict = ds['''validation'''].column_names if data_args.image_column_name is not None: UpperCamelCase : Any = data_args.image_column_name elif "image" in column_names: UpperCamelCase : Tuple = '''image''' elif "img" in column_names: UpperCamelCase : Any = '''img''' else: UpperCamelCase : Any = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: UpperCamelCase : Tuple = image_processor.size['''shortest_edge'''] else: UpperCamelCase : Tuple = (image_processor.size['''height'''], image_processor.size['''width''']) UpperCamelCase : List[str] = Compose( [ Lambda(lambda SCREAMING_SNAKE_CASE_ : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(SCREAMING_SNAKE_CASE_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(SCREAMING_SNAKE_CASE_ : Optional[int] ): UpperCamelCase : str = [transforms(SCREAMING_SNAKE_CASE_ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: UpperCamelCase : List[Any] = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(SCREAMING_SNAKE_CASE_ ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: UpperCamelCase : List[str] = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(SCREAMING_SNAKE_CASE_ ) # Compute absolute learning rate UpperCamelCase : Optional[Any] = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: UpperCamelCase : Tuple = training_args.base_learning_rate * total_train_batch_size / 2_5_6 # Initialize our trainer UpperCamelCase : List[str] = Trainer( model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , ) # Training if training_args.do_train: UpperCamelCase : str = None if training_args.resume_from_checkpoint is not None: UpperCamelCase : Dict = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase : Optional[Any] = last_checkpoint UpperCamelCase : Dict = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCamelCase : int = trainer.evaluate() trainer.log_metrics('''eval''' , SCREAMING_SNAKE_CASE_ ) trainer.save_metrics('''eval''' , SCREAMING_SNAKE_CASE_ ) # Write model card and (optionally) push to hub UpperCamelCase : List[str] = { '''tasks''': '''masked-auto-encoding''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-auto-encoding'''], } if training_args.push_to_hub: trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ ) else: trainer.create_model_card(**SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : List[Any] ): """simple docstring""" main() if __name__ == "__main__": main()
315
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params __UpperCAmelCase : List[str] = getLogger(__name__) __UpperCAmelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu" def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : int="summarization" , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Any , ): """simple docstring""" UpperCamelCase : Dict = Path(SCREAMING_SNAKE_CASE_ ).open('''w''' , encoding='''utf-8''' ) UpperCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) if fpaa: UpperCamelCase : List[Any] = model.half() UpperCamelCase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type. UpperCamelCase : int = time.time() # update config with task specific params use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if prefix is None: UpperCamelCase : Union[str, Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ): UpperCamelCase : Optional[int] = [prefix + text for text in examples_chunk] UpperCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ , padding='''longest''' ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE_ , ) UpperCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) for hypothesis in dec: fout.write(hypothesis + '''\n''' ) fout.flush() fout.close() UpperCamelCase : str = int(time.time() - start_time ) # seconds UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def a ( ): """simple docstring""" return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' ) def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ): """simple docstring""" UpperCamelCase : int = argparse.ArgumentParser() parser.add_argument('''model_name''' , type=SCREAMING_SNAKE_CASE_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''input_path''' , type=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.source''' ) parser.add_argument('''save_path''' , type=SCREAMING_SNAKE_CASE_ , help='''where to save summaries''' ) parser.add_argument('''--reference_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.target''' ) parser.add_argument('''--score_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default='''metrics.json''' , help='''where to save metrics''' ) parser.add_argument('''--device''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''cuda, cuda:1, cpu etc.''' ) parser.add_argument( '''--prefix''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--task''' , type=SCREAMING_SNAKE_CASE_ , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help='''batch size''' ) parser.add_argument( '''--n_obs''' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='''How many observations. Defaults to all.''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' ) parser.add_argument( '''--info''' , nargs='''?''' , type=SCREAMING_SNAKE_CASE_ , const=datetime_now() , help=( '''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.''' ''' lang=en-ru. If no value is passed, the current datetime string will be used.''' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate UpperCamelCase , UpperCamelCase : int = parser.parse_known_args() UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ ) if parsed_args and verbose: print(F"""parsed the following generate kwargs: {parsed_args}""" ) UpperCamelCase : str = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: UpperCamelCase : Tuple = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('''Can\'t mix --fp16 and --device cpu''' ) UpperCamelCase : str = generate_summaries_or_translations( SCREAMING_SNAKE_CASE_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE_ , ) if args.reference_path is None: return {} # Compute scores UpperCamelCase : Tuple = calculate_bleu if '''translation''' in args.task else calculate_rouge UpperCamelCase : Dict = [x.rstrip() for x in open(args.save_path ).readlines()] UpperCamelCase : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE_ )] UpperCamelCase : dict = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) scores.update(SCREAMING_SNAKE_CASE_ ) if args.dump_args: scores.update(SCREAMING_SNAKE_CASE_ ) if args.info: UpperCamelCase : Optional[Any] = args.info if verbose: print(SCREAMING_SNAKE_CASE_ ) if args.score_path is not None: json.dump(SCREAMING_SNAKE_CASE_ , open(args.score_path , '''w''' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
315
1
from __future__ import annotations def a ( SCREAMING_SNAKE_CASE_ : tuple[int, int] , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase , UpperCamelCase : Union[str, Any] = position UpperCamelCase : List[Any] = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] UpperCamelCase : Union[str, Any] = [] for position in positions: UpperCamelCase , UpperCamelCase : List[Any] = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(SCREAMING_SNAKE_CASE_ ) return permissible_positions def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] ): """simple docstring""" return not any(elem == 0 for row in board for elem in row ) def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] , SCREAMING_SNAKE_CASE_ : tuple[int, int] , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" if is_complete(SCREAMING_SNAKE_CASE_ ): return True for position in get_valid_pos(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ): UpperCamelCase , UpperCamelCase : Tuple = position if board[y][x] == 0: UpperCamelCase : Optional[int] = curr + 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , curr + 1 ): return True UpperCamelCase : Dict = 0 return False def a ( SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase : int = [[0 for i in range(SCREAMING_SNAKE_CASE_ )] for j in range(SCREAMING_SNAKE_CASE_ )] for i in range(SCREAMING_SNAKE_CASE_ ): for j in range(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : str = 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE_ , (i, j) , 1 ): return board UpperCamelCase : Union[str, Any] = 0 UpperCamelCase : str = F"""Open Kight Tour cannot be performed on a board of size {n}""" raise ValueError(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod()
315
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : int = ["image_processor", "tokenizer"] __UpperCamelCase : List[str] = "AutoImageProcessor" __UpperCamelCase : Optional[Any] = "AutoTokenizer" def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __SCREAMING_SNAKE_CASE , ) UpperCamelCase : Any = kwargs.pop('''feature_extractor''' ) UpperCamelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = self.image_processor UpperCamelCase : int = False def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = kwargs.pop('''images''' , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = kwargs.pop('''text''' , __SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: UpperCamelCase : Union[str, Any] = args[0] UpperCamelCase : str = args[1:] if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: UpperCamelCase : List[str] = self.image_processor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if text is not None: UpperCamelCase : Optional[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if text is None: return inputs elif images is None: return encodings else: UpperCamelCase : List[str] = encodings['''input_ids'''] return inputs def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @contextmanager def _lowercase ( self ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your images inputs, or in a separate call.''' ) UpperCamelCase : Any = True UpperCamelCase : int = self.tokenizer yield UpperCamelCase : List[Any] = self.image_processor UpperCamelCase : Tuple = False def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" if added_vocab is None: UpperCamelCase : str = self.tokenizer.get_added_vocab() UpperCamelCase : int = {} while tokens: UpperCamelCase : Dict = re.search(R'''<s_(.*?)>''' , __SCREAMING_SNAKE_CASE , re.IGNORECASE ) if start_token is None: break UpperCamelCase : List[str] = start_token.group(1 ) UpperCamelCase : Dict = re.search(Rf"""</s_{key}>""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE ) UpperCamelCase : Any = start_token.group() if end_token is None: UpperCamelCase : Optional[int] = tokens.replace(__SCREAMING_SNAKE_CASE , '''''' ) else: UpperCamelCase : Dict = end_token.group() UpperCamelCase : int = re.escape(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = re.escape(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE ) if content is not None: UpperCamelCase : Dict = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node UpperCamelCase : Tuple = self.tokenajson(__SCREAMING_SNAKE_CASE , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE ) if value: if len(__SCREAMING_SNAKE_CASE ) == 1: UpperCamelCase : str = value[0] UpperCamelCase : str = value else: # leaf nodes UpperCamelCase : Optional[int] = [] for leaf in content.split(R'''<sep/>''' ): UpperCamelCase : Optional[int] = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": UpperCamelCase : int = leaf[1:-2] # for categorical special tokens output[key].append(__SCREAMING_SNAKE_CASE ) if len(output[key] ) == 1: UpperCamelCase : Tuple = output[key][0] UpperCamelCase : List[Any] = tokens[tokens.find(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def _lowercase ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def _lowercase ( self ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor
315
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def a ( ): """simple docstring""" UpperCamelCase : str = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg''' UpperCamelCase : List[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert('''RGB''' ) return image def a ( SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" UpperCamelCase : Dict = [] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') ) # fmt: on return rename_keys def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] ): """simple docstring""" UpperCamelCase : Dict = dct.pop(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = val def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases UpperCamelCase : Dict = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" ) UpperCamelCase : Dict = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict UpperCamelCase : int = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE_ , requires_grad=SCREAMING_SNAKE_CASE_ ), v_bias) ) UpperCamelCase : Any = qkv_bias def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ): """simple docstring""" UpperCamelCase : Optional[Any] = 3_6_4 if '''coco''' in model_name else 2_2_4 UpperCamelCase : Dict = InstructBlipVisionConfig(image_size=SCREAMING_SNAKE_CASE_ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: UpperCamelCase : List[Any] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: UpperCamelCase : Union[str, Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: UpperCamelCase : List[Any] = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_2_0_0_1 ).to_dict() elif "vicuna-13b" in model_name: UpperCamelCase : Tuple = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_2_0_0_1 ).to_dict() else: raise ValueError('''Model name not supported''' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 UpperCamelCase : Dict = InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict() UpperCamelCase : Tuple = InstructBlipConfig(vision_config=SCREAMING_SNAKE_CASE_ , text_config=SCREAMING_SNAKE_CASE_ , qformer_config=SCREAMING_SNAKE_CASE_ ) return config, image_size @torch.no_grad() def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Tuple=False ): """simple docstring""" UpperCamelCase : List[str] = AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' ) qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} ) if "t5" in model_name: UpperCamelCase : Optional[Any] = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) UpperCamelCase : Optional[Any] = LlamaTokenizerFast.from_pretrained( '''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' ) tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} ) UpperCamelCase , UpperCamelCase : Dict = get_blipa_config(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = InstructBlipForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval() UpperCamelCase : Optional[Any] = { '''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''), '''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''), '''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''), '''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''), } UpperCamelCase , UpperCamelCase : Union[str, Any] = model_name_to_original[model_name] # load original model print('''Loading original model...''' ) UpperCamelCase : List[str] = '''cuda:1''' if torch.cuda.is_available() else '''cpu''' UpperCamelCase : List[Any] = '''cuda:2''' if torch.cuda.is_available() else '''cpu''' UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = load_model_and_preprocess( name=SCREAMING_SNAKE_CASE_ , model_type=SCREAMING_SNAKE_CASE_ , is_eval=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ ) original_model.eval() print('''Done!''' ) # update state dict keys UpperCamelCase : List[Any] = original_model.state_dict() UpperCamelCase : Optional[Any] = create_rename_keys(SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): UpperCamelCase : Dict = state_dict.pop(SCREAMING_SNAKE_CASE_ ) if key.startswith('''Qformer.bert''' ): UpperCamelCase : Dict = key.replace('''Qformer.bert''' , '''qformer''' ) if "attention.self" in key: UpperCamelCase : Dict = key.replace('''self''' , '''attention''' ) if "llm_proj" in key: UpperCamelCase : Optional[int] = key.replace('''llm_proj''' , '''language_projection''' ) if "t5_proj" in key: UpperCamelCase : Optional[int] = key.replace('''t5_proj''' , '''language_projection''' ) if key.startswith('''llm_model''' ): UpperCamelCase : Optional[Any] = key.replace('''llm_model''' , '''language_model''' ) if key.startswith('''t5''' ): UpperCamelCase : str = key.replace('''t5''' , '''language''' ) UpperCamelCase : str = val # read in qv biases read_in_q_v_bias(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = load_demo_image() UpperCamelCase : str = '''What is unusual about this image?''' # create processor UpperCamelCase : Optional[Any] = BlipImageProcessor( size={'''height''': image_size, '''width''': image_size} , image_mean=SCREAMING_SNAKE_CASE_ , image_std=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = InstructBlipProcessor( image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase : Union[str, Any] = processor(images=SCREAMING_SNAKE_CASE_ , text=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # make sure processor creates exact same pixel values UpperCamelCase : Optional[int] = vis_processors['''eval'''](SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , SCREAMING_SNAKE_CASE_ ) original_model.to(SCREAMING_SNAKE_CASE_ ) hf_model.to(SCREAMING_SNAKE_CASE_ ) with torch.no_grad(): if "vicuna" in model_name: UpperCamelCase : List[str] = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits UpperCamelCase : Dict = hf_model(**SCREAMING_SNAKE_CASE_ ).logits else: UpperCamelCase : Optional[Any] = original_model( {'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits UpperCamelCase : List[str] = tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_0_0 ) UpperCamelCase : Tuple = hf_model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ).logits print('''First values of original logits:''' , original_logits[0, :3, :3] ) print('''First values of HF logits:''' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape UpperCamelCase : Optional[int] = 1E-4 if '''vicuna''' in model_name else 1E-5 assert torch.allclose(original_logits.to(logits.device ) , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) print('''Looks ok!''' ) print('''Generating with original model...''' ) UpperCamelCase : Tuple = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('''Generating with HF model...''' ) UpperCamelCase : Optional[Any] = hf_model.generate( **SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=5 , max_length=2_5_6 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? UpperCamelCase : Union[str, Any] = 2 print('''Original generation:''' , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = processor.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = [text.strip() for text in output_text] print('''HF generation:''' , SCREAMING_SNAKE_CASE_ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: processor.push_to_hub(F"""Salesforce/{model_name}""" ) hf_model.push_to_hub(F"""Salesforce/{model_name}""" ) if __name__ == "__main__": __UpperCAmelCase : List[str] = argparse.ArgumentParser() __UpperCAmelCase : Tuple = [ "instructblip-vicuna-7b", "instructblip-vicuna-13b", "instructblip-flan-t5-xl", "instructblip-flan-t5-xxl", ] parser.add_argument( "--model_name", default="instructblip-flan-t5-xl", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) __UpperCAmelCase : str = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
315
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase : Union[str, Any] = { "configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"], "processing_mgp_str": ["MgpstrProcessor"], "tokenization_mgp_str": ["MgpstrTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Union[str, Any] = [ "MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST", "MgpstrModel", "MgpstrPreTrainedModel", "MgpstrForSceneTextRecognition", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys __UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
315
1
import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ): """simple docstring""" UpperCamelCase : Union[str, Any] = int(SCREAMING_SNAKE_CASE_ ) UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0 return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}""" def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any=3_0_0 ): """simple docstring""" return F""" <div> {prefix} <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress> {label} </div> """ def a ( SCREAMING_SNAKE_CASE_ : List[Any] ): """simple docstring""" UpperCamelCase : Dict = '''<table border="1" class="dataframe">\n''' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F""" <th>{i}</th>\n""" html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: UpperCamelCase : Tuple = F"""{elt:.6f}""" if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else str(SCREAMING_SNAKE_CASE_ ) html_code += F""" <td>{elt}</td>\n""" html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : str = 5 __UpperCamelCase : int = 0.2 def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ): """simple docstring""" UpperCamelCase : List[Any] = total UpperCamelCase : str = '''''' if prefix is None else prefix UpperCamelCase : int = leave UpperCamelCase : int = parent UpperCamelCase : List[Any] = width UpperCamelCase : Any = None UpperCamelCase : Any = None UpperCamelCase : Optional[int] = None def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : Dict = value if comment is not None: UpperCamelCase : Union[str, Any] = comment if self.last_value is None: UpperCamelCase : Any = time.time() UpperCamelCase : Union[str, Any] = value UpperCamelCase : List[str] = None UpperCamelCase : Dict = self.warmup UpperCamelCase : Union[str, Any] = 1 self.update_bar(__SCREAMING_SNAKE_CASE ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 UpperCamelCase : Any = time.time() UpperCamelCase : List[str] = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: UpperCamelCase : str = self.elapsed_time / (value - self.start_value) else: UpperCamelCase : Tuple = None if value >= self.total: UpperCamelCase : Dict = self.total UpperCamelCase : Any = None if not self.leave: self.close() elif self.average_time_per_item is not None: UpperCamelCase : List[str] = self.average_time_per_item * (self.total - value) self.update_bar(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = value UpperCamelCase : Any = current_time if self.average_time_per_item is None: UpperCamelCase : str = 1 else: UpperCamelCase : List[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" UpperCamelCase : Dict = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE ) if self.elapsed_time is None: UpperCamelCase : Any = f"""[{spaced_value}/{self.total} : < :""" elif self.predicted_remaining is None: UpperCamelCase : int = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}""" else: UpperCamelCase : List[Any] = ( f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <""" f""" {format_time(self.predicted_remaining )}""" ) self.label += f""", {1/self.average_time_per_item:.2f} it/s""" self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]""" self.display() def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: UpperCamelCase : List[str] = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE ) else: self.output.update(disp.HTML(self.html_code ) ) def _lowercase ( self ): """simple docstring""" if self.parent is None and self.output is not None: self.output.update(disp.HTML('''''' ) ) class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" super().__init__(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = None if column_names is None else [column_names] UpperCamelCase : Dict = None def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: UpperCamelCase : str = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE ) else: self.output.update(disp.HTML(self.html_code ) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if self.inner_table is None: UpperCamelCase : List[str] = [list(values.keys() ), list(values.values() )] else: UpperCamelCase : Any = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = columns self.inner_table.append([values[c] for c in columns] ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ): """simple docstring""" UpperCamelCase : str = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE ) return self.child_bar def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = None self.display() class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self ): """simple docstring""" UpperCamelCase : Dict = None UpperCamelCase : int = None UpperCamelCase : Tuple = False def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step''' UpperCamelCase : List[str] = 0 UpperCamelCase : Optional[Any] = 0 UpperCamelCase : Tuple = [self.first_column] + ['''Training Loss'''] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append('''Validation Loss''' ) UpperCamelCase : Dict = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}""" self.training_tracker.update( state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , ) UpperCamelCase : str = False def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ): """simple docstring""" if not has_length(__SCREAMING_SNAKE_CASE ): return if self.prediction_bar is None: if self.training_tracker is not None: UpperCamelCase : str = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) ) else: UpperCamelCase : Any = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" if self.prediction_bar is not None: self.prediction_bar.close() UpperCamelCase : Optional[int] = None def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ): """simple docstring""" if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: UpperCamelCase : str = {'''Training Loss''': logs['''loss''']} # First column is necessarily Step sine we're not in epoch eval strategy UpperCamelCase : Any = state.global_step self.training_tracker.write_line(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ): """simple docstring""" if self.training_tracker is not None: UpperCamelCase : str = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''} for log in reversed(state.log_history ): if "loss" in log: UpperCamelCase : Union[str, Any] = log['''loss'''] break if self.first_column == "Epoch": UpperCamelCase : int = int(state.epoch ) else: UpperCamelCase : Tuple = state.global_step UpperCamelCase : Any = '''eval''' for k in metrics: if k.endswith('''_loss''' ): UpperCamelCase : int = re.sub(R'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE ) UpperCamelCase : int = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = metrics.pop(f"""{metric_key_prefix}_runtime""" , __SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , __SCREAMING_SNAKE_CASE ) for k, v in metrics.items(): if k == f"""{metric_key_prefix}_loss""": UpperCamelCase : List[str] = v else: UpperCamelCase : Dict = k.split('''_''' ) UpperCamelCase : Union[str, Any] = ''' '''.join([part.capitalize() for part in splits[1:]] ) UpperCamelCase : Optional[Any] = v self.training_tracker.write_line(__SCREAMING_SNAKE_CASE ) self.training_tracker.remove_child() UpperCamelCase : Optional[int] = None # Evaluation takes a long time so we should force the next update. UpperCamelCase : Optional[Any] = True def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" self.training_tracker.update( state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = None
315
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ): """simple docstring""" UpperCamelCase : List[str] = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
315
1
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL __UpperCAmelCase : Dict = logging.get_logger(__name__) def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[int, Iterable[int]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" def constraint_to_multiple_of(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Optional[int]=None ): UpperCamelCase : Union[str, Any] = round(val / multiple ) * multiple if max_val is not None and x > max_val: UpperCamelCase : Union[str, Any] = math.floor(val / multiple ) * multiple if x < min_val: UpperCamelCase : Union[str, Any] = math.ceil(val / multiple ) * multiple return x UpperCamelCase : Optional[int] = (output_size, output_size) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else output_size UpperCamelCase , UpperCamelCase : Optional[int] = get_image_size(SCREAMING_SNAKE_CASE_ ) UpperCamelCase , UpperCamelCase : int = output_size # determine new height and width UpperCamelCase : str = output_height / input_height UpperCamelCase : Union[str, Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width UpperCamelCase : Union[str, Any] = scale_width else: # fit height UpperCamelCase : str = scale_height UpperCamelCase : int = constraint_to_multiple_of(scale_height * input_height , multiple=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=SCREAMING_SNAKE_CASE_ ) return (new_height, new_width) class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[str] = ["pixel_values"] def __init__( self , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 1 / 255 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = size if size is not None else {'''height''': 384, '''width''': 384} UpperCamelCase : Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = do_resize UpperCamelCase : Optional[int] = size UpperCamelCase : int = keep_aspect_ratio UpperCamelCase : Union[str, Any] = ensure_multiple_of UpperCamelCase : Dict = resample UpperCamelCase : Optional[Any] = do_rescale UpperCamelCase : Optional[Any] = rescale_factor UpperCamelCase : List[str] = do_normalize UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) UpperCamelCase : List[str] = get_resize_output_image_size( __SCREAMING_SNAKE_CASE , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=__SCREAMING_SNAKE_CASE , multiple=__SCREAMING_SNAKE_CASE , ) return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize UpperCamelCase : Optional[int] = size if size is not None else self.size UpperCamelCase : Optional[Any] = get_size_dict(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio UpperCamelCase : Tuple = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of UpperCamelCase : Optional[int] = resample if resample is not None else self.resample UpperCamelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean UpperCamelCase : Union[str, Any] = image_std if image_std is not None else self.image_std UpperCamelCase : str = make_list_of_images(__SCREAMING_SNAKE_CASE ) if not valid_images(__SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCamelCase : List[Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images] if do_resize: UpperCamelCase : List[str] = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: UpperCamelCase : Optional[Any] = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: UpperCamelCase : Dict = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images] UpperCamelCase : Union[str, Any] = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images] UpperCamelCase : str = {'''pixel_values''': images} return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : Any = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Dict = target_sizes.numpy() UpperCamelCase : Union[str, Any] = [] for idx in range(len(__SCREAMING_SNAKE_CASE ) ): UpperCamelCase : int = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__SCREAMING_SNAKE_CASE ) else: UpperCamelCase : str = logits.argmax(dim=1 ) UpperCamelCase : Optional[int] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
315
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = eval_examples UpperCamelCase : Optional[Any] = post_process_function def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ): """simple docstring""" UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCamelCase : Any = self.compute_metrics UpperCamelCase : List[Any] = None UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCamelCase : Dict = time.time() try: UpperCamelCase : str = eval_loop( __SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: UpperCamelCase : Union[str, Any] = compute_metrics UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions ) UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE ) metrics.update(output.metrics ) else: UpperCamelCase : List[Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(__SCREAMING_SNAKE_CASE ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE ) return metrics def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ): """simple docstring""" UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE ) # Temporarily disable metric computation, we will do it in the loop here. UpperCamelCase : Union[str, Any] = self.compute_metrics UpperCamelCase : Tuple = None UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCamelCase : Optional[int] = time.time() try: UpperCamelCase : int = eval_loop( __SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: UpperCamelCase : int = compute_metrics UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' ) UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
315
1
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ): """simple docstring""" UpperCamelCase : List[str] = parent UpperCamelCase : int = batch_size UpperCamelCase : Dict = seq_length UpperCamelCase : str = is_training UpperCamelCase : Union[str, Any] = use_attention_mask UpperCamelCase : Dict = use_token_type_ids UpperCamelCase : Optional[int] = use_labels UpperCamelCase : Dict = vocab_size UpperCamelCase : Dict = hidden_size UpperCamelCase : Optional[int] = num_hidden_layers UpperCamelCase : List[Any] = num_attention_heads UpperCamelCase : Any = intermediate_size UpperCamelCase : Optional[int] = hidden_act UpperCamelCase : Tuple = hidden_dropout_prob UpperCamelCase : Optional[Any] = attention_probs_dropout_prob UpperCamelCase : Dict = max_position_embeddings UpperCamelCase : Optional[int] = type_vocab_size UpperCamelCase : str = type_sequence_label_size UpperCamelCase : str = initializer_range UpperCamelCase : Dict = num_choices def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : Dict = None if self.use_attention_mask: UpperCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase : Tuple = None if self.use_token_type_ids: UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase : List[str] = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = config_and_inputs UpperCamelCase : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = config_and_inputs UpperCamelCase : Union[str, Any] = True UpperCamelCase : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class UpperCAmelCase_ ( _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : Optional[int] = True __UpperCamelCase : Optional[Any] = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = FlaxRobertaPreLayerNormModelTester(self ) @slow def _lowercase ( self ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCamelCase : int = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @require_flax class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) UpperCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE )[0] UpperCamelCase : int = [1, 11, 50_265] self.assertEqual(list(output.shape ) , __SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. UpperCamelCase : List[str] = np.array( [[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) UpperCamelCase : int = model(__SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. UpperCamelCase : str = np.array( [[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
315
from __future__ import annotations import collections import pprint from pathlib import Path def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return "".join(sorted(SCREAMING_SNAKE_CASE_ ) ) def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return word_by_signature[signature(SCREAMING_SNAKE_CASE_ )] __UpperCAmelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8") __UpperCAmelCase : Tuple = sorted({word.strip().lower() for word in data.splitlines()}) __UpperCAmelCase : Union[str, Any] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": __UpperCAmelCase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("anagrams.txt", "w") as file: file.write("all_anagrams = \n ") file.write(pprint.pformat(all_anagrams))
315
1
from argparse import ArgumentParser from .env import EnvironmentCommand def a ( ): """simple docstring""" UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' ) UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' ) # Register commands EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ ) # Let's go UpperCamelCase : List[Any] = parser.parse_args() if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ): parser.print_help() exit(1 ) # Run UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ ) service.run() if __name__ == "__main__": main()
315
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ): """simple docstring""" UpperCamelCase : list[list[float]] = [] for data in source_data: for i, el in enumerate(SCREAMING_SNAKE_CASE_ ): if len(SCREAMING_SNAKE_CASE_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) ) return data_lists def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ): """simple docstring""" UpperCamelCase : list[list[float]] = [] for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: UpperCamelCase : Dict = F"""Invalid weight of {weight:f} provided""" raise ValueError(SCREAMING_SNAKE_CASE_ ) score_lists.append(SCREAMING_SNAKE_CASE_ ) return score_lists def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ): """simple docstring""" UpperCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : str = final_scores[j] + ele return final_scores def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ): """simple docstring""" UpperCamelCase : str = get_data(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = generate_final_scores(SCREAMING_SNAKE_CASE_ ) # append scores to source data for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ): source_data[i].append(SCREAMING_SNAKE_CASE_ ) return source_data
315
1
from __future__ import annotations from collections.abc import Sequence from typing import Literal def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" UpperCamelCase : List[Any] = list(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = list(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = 0 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): if lista[i] != lista[i]: count += 1 UpperCamelCase : str = '''_''' if count > 1: return False else: return "".join(SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : list[str] ): """simple docstring""" UpperCamelCase : Tuple = [] while True: UpperCamelCase : Optional[int] = ['''$'''] * len(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): for j in range(i + 1 , len(SCREAMING_SNAKE_CASE_ ) ): UpperCamelCase : Optional[int] = compare_string(binary[i] , binary[j] ) if k is False: UpperCamelCase : Union[str, Any] = '''*''' UpperCamelCase : Optional[int] = '''*''' temp.append('''X''' ) for i in range(len(SCREAMING_SNAKE_CASE_ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(SCREAMING_SNAKE_CASE_ ) == 0: return pi UpperCamelCase : int = list(set(SCREAMING_SNAKE_CASE_ ) ) def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Sequence[float] ): """simple docstring""" UpperCamelCase : Optional[int] = [] for minterm in minterms: UpperCamelCase : List[Any] = '''''' for _ in range(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : int = str(minterm % 2 ) + string minterm //= 2 temp.append(SCREAMING_SNAKE_CASE_ ) return temp def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase : List[Any] = list(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = list(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = 0 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] , SCREAMING_SNAKE_CASE_ : list[str] ): """simple docstring""" UpperCamelCase : List[Any] = [] UpperCamelCase : Dict = [0] * len(SCREAMING_SNAKE_CASE_ ) for i in range(len(chart[0] ) ): UpperCamelCase : Dict = 0 UpperCamelCase : Optional[int] = -1 for j in range(len(SCREAMING_SNAKE_CASE_ ) ): if chart[j][i] == 1: count += 1 UpperCamelCase : Tuple = j if count == 1: UpperCamelCase : Union[str, Any] = 1 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(SCREAMING_SNAKE_CASE_ ) ): UpperCamelCase : Tuple = 0 temp.append(prime_implicants[i] ) while True: UpperCamelCase : Tuple = 0 UpperCamelCase : Union[str, Any] = -1 UpperCamelCase : List[str] = 0 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): UpperCamelCase : int = chart[i].count(1 ) if count_n > max_n: UpperCamelCase : Optional[Any] = count_n UpperCamelCase : List[str] = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(SCREAMING_SNAKE_CASE_ ) ): UpperCamelCase : int = 0 def a ( SCREAMING_SNAKE_CASE_ : list[str] , SCREAMING_SNAKE_CASE_ : list[str] ): """simple docstring""" UpperCamelCase : List[Any] = [[0 for x in range(len(SCREAMING_SNAKE_CASE_ ) )] for x in range(len(SCREAMING_SNAKE_CASE_ ) )] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): UpperCamelCase : Any = prime_implicants[i].count('''_''' ) for j in range(len(SCREAMING_SNAKE_CASE_ ) ): if is_for_table(prime_implicants[i] , binary[j] , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Union[str, Any] = 1 return chart def a ( ): """simple docstring""" UpperCamelCase : List[str] = int(input('''Enter the no. of variables\n''' ) ) UpperCamelCase : int = [ float(SCREAMING_SNAKE_CASE_ ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] UpperCamelCase : Tuple = decimal_to_binary(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = check(SCREAMING_SNAKE_CASE_ ) print('''Prime Implicants are:''' ) print(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = prime_implicant_chart(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = selection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print('''Essential Prime Implicants are:''' ) print(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
315
import glob import os import random from string import ascii_lowercase, digits import cva __UpperCAmelCase : Optional[int] = "" __UpperCAmelCase : Union[str, Any] = "" __UpperCAmelCase : Optional[int] = "" __UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal) def a ( ): """simple docstring""" UpperCamelCase , UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print('''Processing...''' ) UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for index, image in enumerate(SCREAMING_SNAKE_CASE_ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' UpperCamelCase : Optional[int] = random_chars(3_2 ) UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] ) print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" ) UpperCamelCase : Any = [] for anno in new_annos[index]: UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(SCREAMING_SNAKE_CASE_ ) with open(F"""/{file_root}.txt""" , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" UpperCamelCase : Any = [] UpperCamelCase : Union[str, Any] = [] for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ): UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(SCREAMING_SNAKE_CASE_ ) as in_file: UpperCamelCase : List[str] = in_file.readlines() UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" ) UpperCamelCase : Union[str, Any] = [] for obj_list in obj_lists: UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(SCREAMING_SNAKE_CASE_ ) labels.append(SCREAMING_SNAKE_CASE_ ) return img_paths, labels def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ): """simple docstring""" UpperCamelCase : List[Any] = [] UpperCamelCase : str = [] UpperCamelCase : int = [] for idx in range(len(SCREAMING_SNAKE_CASE_ ) ): UpperCamelCase : Tuple = [] UpperCamelCase : Optional[int] = img_list[idx] path_list.append(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = anno_list[idx] UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ ) if flip_type == 1: UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for bbox in img_annos: UpperCamelCase : Optional[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for bbox in img_annos: UpperCamelCase : Union[str, Any] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(SCREAMING_SNAKE_CASE_ ) new_imgs_list.append(SCREAMING_SNAKE_CASE_ ) return new_imgs_list, new_annos_lists, path_list def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" UpperCamelCase : Any = ascii_lowercase + digits return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": main() print("DONE ✅")
315
1
import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : str=None , ): """simple docstring""" if attention_mask is None: UpperCamelCase : Dict = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: UpperCamelCase : List[str] = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: UpperCamelCase : int = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=SCREAMING_SNAKE_CASE_ ) if decoder_head_mask is None: UpperCamelCase : Any = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=SCREAMING_SNAKE_CASE_ ) if cross_attn_head_mask is None: UpperCamelCase : int = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=SCREAMING_SNAKE_CASE_ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class UpperCAmelCase_ : '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=20 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , ): """simple docstring""" UpperCamelCase : int = parent UpperCamelCase : int = batch_size UpperCamelCase : int = seq_length UpperCamelCase : Optional[int] = is_training UpperCamelCase : Union[str, Any] = use_labels UpperCamelCase : str = vocab_size UpperCamelCase : List[Any] = hidden_size UpperCamelCase : Tuple = num_hidden_layers UpperCamelCase : Tuple = num_attention_heads UpperCamelCase : Dict = intermediate_size UpperCamelCase : int = hidden_act UpperCamelCase : Any = hidden_dropout_prob UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob UpperCamelCase : int = encoder_layerdrop UpperCamelCase : Tuple = decoder_layerdrop UpperCamelCase : int = max_position_embeddings UpperCamelCase : List[str] = eos_token_id UpperCamelCase : Any = pad_token_id UpperCamelCase : Union[str, Any] = bos_token_id def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : Union[str, Any] = self.eos_token_id # Eos Token UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input UpperCamelCase : int = input_ids.clamp(self.pad_token_id + 1 ) UpperCamelCase : Tuple = decoder_input_ids.clamp(self.pad_token_id + 1 ) UpperCamelCase : Optional[int] = self.get_config() UpperCamelCase : Optional[int] = prepare_mam_aaa_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return config, inputs_dict def _lowercase ( self ): """simple docstring""" return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase , UpperCamelCase : List[Any] = self.prepare_config_and_inputs() return config, inputs_dict def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[Any] = MaMaaaModel(config=__SCREAMING_SNAKE_CASE ).get_decoder().to(__SCREAMING_SNAKE_CASE ).eval() UpperCamelCase : Union[str, Any] = inputs_dict['''input_ids'''] UpperCamelCase : str = inputs_dict['''attention_mask'''] UpperCamelCase : int = inputs_dict['''head_mask'''] # first forward pass UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE ) UpperCamelCase , UpperCamelCase : List[Any] = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase : List[Any] = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCamelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase : int = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )['''last_hidden_state'''] UpperCamelCase : int = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )[ '''last_hidden_state''' ] # select random slice UpperCamelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase : int = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase : List[Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-2 ) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Dict = MaMaaaModel(config=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ).eval() UpperCamelCase : List[Any] = model(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = outputs.encoder_last_hidden_state UpperCamelCase : Optional[Any] = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase : Dict = model.get_encoder() encoder.save_pretrained(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = MaMaaaEncoder.from_pretrained(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = encoder(inputs_dict['''input_ids'''] , attention_mask=inputs_dict['''attention_mask'''] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase : Union[str, Any] = model.get_decoder() decoder.save_pretrained(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = MaMaaaDecoder.from_pretrained(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = decoder( input_ids=inputs_dict['''decoder_input_ids'''] , attention_mask=inputs_dict['''decoder_attention_mask'''] , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=inputs_dict['''attention_mask'''] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class UpperCAmelCase_ ( _a, _a, _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : Optional[int] = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) __UpperCamelCase : str = (MaMaaaForConditionalGeneration,) if is_torch_available() else () __UpperCamelCase : List[str] = ( { "conversational": MaMaaaForConditionalGeneration, "feature-extraction": MaMaaaModel, "summarization": MaMaaaForConditionalGeneration, "text2text-generation": MaMaaaForConditionalGeneration, "translation": MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) __UpperCamelCase : List[str] = True __UpperCamelCase : List[str] = True __UpperCamelCase : List[str] = False __UpperCamelCase : Optional[int] = False def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = MaMaaaModelTester(self ) UpperCamelCase : Union[str, Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self ): """simple docstring""" UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: UpperCamelCase : Any = model_class(__SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__SCREAMING_SNAKE_CASE ) UpperCamelCase , UpperCamelCase : Union[str, Any] = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , output_loading_info=__SCREAMING_SNAKE_CASE ) self.assertEqual(info['''missing_keys'''] , [] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): UpperCamelCase : str = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : int = copy.deepcopy(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) if not self.is_encoder_decoder: UpperCamelCase : Tuple = inputs['''input_ids'''] del inputs["input_ids"] else: UpperCamelCase : Union[str, Any] = inputs['''input_ids'''] UpperCamelCase : List[Any] = inputs.get('''decoder_input_ids''' , __SCREAMING_SNAKE_CASE ) del inputs["input_ids"] inputs.pop('''decoder_input_ids''' , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = model.get_input_embeddings() if not self.is_encoder_decoder: UpperCamelCase : int = wte(__SCREAMING_SNAKE_CASE ) else: UpperCamelCase : Any = wte(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = wte(__SCREAMING_SNAKE_CASE ) with torch.no_grad(): model(**__SCREAMING_SNAKE_CASE )[0] def _lowercase ( self ): """simple docstring""" UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() UpperCamelCase : List[Any] = input_dict['''input_ids'''] UpperCamelCase : Dict = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = MaMaaaForConditionalGeneration(__SCREAMING_SNAKE_CASE ).eval().to(__SCREAMING_SNAKE_CASE ) if torch_device == "cuda": model.half() model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) model.generate(num_beams=4 , do_sample=__SCREAMING_SNAKE_CASE , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=3 ) def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) __UpperCAmelCase : Any = 1E-4 @require_torch @require_sentencepiece @require_tokenizers @slow class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' @cached_property def _lowercase ( self ): """simple docstring""" return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = MaMaaaModel.from_pretrained('''facebook/m2m100_418M''' ).to(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] ) UpperCamelCase : str = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] ) UpperCamelCase : Tuple = prepare_mam_aaa_inputs_dict(model.config , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with torch.no_grad(): UpperCamelCase : Tuple = model(**__SCREAMING_SNAKE_CASE )[0] UpperCamelCase : Dict = torch.Size((1, 11, 1_024) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) # change to expected output here UpperCamelCase : Union[str, Any] = torch.tensor( [[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(__SCREAMING_SNAKE_CASE ) # change to intended input UpperCamelCase : int = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] ) UpperCamelCase : List[str] = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] ) UpperCamelCase : List[Any] = prepare_mam_aaa_inputs_dict(model.config , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with torch.no_grad(): UpperCamelCase : Optional[int] = model(**__SCREAMING_SNAKE_CASE )[0] UpperCamelCase : str = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) # change to expected output here UpperCamelCase : Optional[Any] = torch.tensor( [[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' , src_lang='''fr''' , tgt_lang='''en''' ) UpperCamelCase : List[str] = [ '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent''' ''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de''' ''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''', ] # The below article tests that we don't add any hypotheses outside of the top n_beams UpperCamelCase : List[str] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) UpperCamelCase : Tuple = model.generate( input_ids=dct['''input_ids'''].to(__SCREAMING_SNAKE_CASE ) , attention_mask=dct['''attention_mask'''].to(__SCREAMING_SNAKE_CASE ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('''en''' ) , ) UpperCamelCase : Union[str, Any] = [ '''The NSA case highlights the total absence of intelligence debate''', '''I think there are two levels of response from the French government.''', '''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.''' ''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all''' ''' communications in France.''', ] UpperCamelCase : List[str] = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) assert generated == expected_en
315
import qiskit def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' ) UpperCamelCase : Any = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment return job.result().get_counts(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": __UpperCAmelCase : int = half_adder(1, 1) print(f'''Half Adder Output Qubit Counts: {counts}''')
315
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase : Dict = { "configuration_xlm_roberta_xl": [ "XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaXLConfig", "XLMRobertaXLOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Optional[int] = [ "XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMRobertaXLForCausalLM", "XLMRobertaXLForMaskedLM", "XLMRobertaXLForMultipleChoice", "XLMRobertaXLForQuestionAnswering", "XLMRobertaXLForSequenceClassification", "XLMRobertaXLForTokenClassification", "XLMRobertaXLModel", "XLMRobertaXLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys __UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
315
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __UpperCAmelCase : str = logging.get_logger(__name__) def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ): """simple docstring""" UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() ) class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[str] = CLIPConfig __UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"] def __init__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = CLIPVisionModel(config.vision_config ) UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE ) @torch.no_grad() def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy() UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy() UpperCamelCase : Dict = [] UpperCamelCase : List[str] = image_embeds.shape[0] for i in range(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images UpperCamelCase : Optional[int] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): UpperCamelCase : List[str] = special_cos_dist[i][concept_idx] UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item() UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} ) UpperCamelCase : Optional[int] = 0.01 for concept_idx in range(len(cos_dist[0] ) ): UpperCamelCase : Optional[int] = cos_dist[i][concept_idx] UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item() UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE ) result.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ) UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images UpperCamelCase : Union[str, Any] = 0.0 UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 ) UpperCamelCase : int = special_care * 0.01 UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
315
1
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __UpperCAmelCase : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: __UpperCAmelCase : List[Any] = json.load(f) @require_torch class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return FSMTTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[Any] = FSMTForConditionalGeneration.from_pretrained(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['''en-ru''', 26.0], ['''ru-en''', 22.0], ['''en-de''', 22.0], ['''de-en''', 29.0], ] ) @slow def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[Any] = f"""facebook/wmt19-{pair}""" UpperCamelCase : str = self.get_tokenizer(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = self.get_model(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = bleu_data[pair]['''src'''] UpperCamelCase : Optional[Any] = bleu_data[pair]['''tgt'''] UpperCamelCase : Dict = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , truncation=__SCREAMING_SNAKE_CASE , padding='''longest''' ).to(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = model.generate( input_ids=batch.input_ids , num_beams=8 , ) UpperCamelCase : Tuple = tokenizer.batch_decode( __SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = calculate_bleu(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) print(__SCREAMING_SNAKE_CASE ) self.assertGreaterEqual(scores['''bleu'''] , __SCREAMING_SNAKE_CASE )
315
from argparse import ArgumentParser from .env import EnvironmentCommand def a ( ): """simple docstring""" UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' ) UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' ) # Register commands EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ ) # Let's go UpperCamelCase : List[Any] = parser.parse_args() if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ): parser.print_help() exit(1 ) # Run UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ ) service.run() if __name__ == "__main__": main()
315
1
import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ): """simple docstring""" if gpta_config_file == "": UpperCamelCase : int = GPTaConfig() else: UpperCamelCase : Any = GPTaConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = GPTaModel(SCREAMING_SNAKE_CASE_ ) # Load weights from numpy load_tf_weights_in_gpta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model UpperCamelCase : Optional[Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME UpperCamelCase : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __UpperCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--gpt2_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained OpenAI model. \n" "This specifies the model architecture." ), ) __UpperCAmelCase : int = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
315
def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return "".join(chr(ord(SCREAMING_SNAKE_CASE_ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
315
1
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch __UpperCAmelCase : Optional[int] = random.Random() def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=1.0 , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None ): """simple docstring""" if rng is None: UpperCamelCase : Optional[Any] = global_rng UpperCamelCase : Tuple = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=2_000 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=160 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=4_000 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , ): """simple docstring""" UpperCamelCase : Any = parent UpperCamelCase : Optional[int] = batch_size UpperCamelCase : int = min_seq_length UpperCamelCase : Tuple = max_seq_length UpperCamelCase : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Optional[Any] = padding_value UpperCamelCase : int = sampling_rate UpperCamelCase : str = return_attention_mask UpperCamelCase : List[Any] = do_normalize UpperCamelCase : List[Any] = feature_size UpperCamelCase : Optional[Any] = chunk_length UpperCamelCase : int = hop_length def _lowercase ( self ): """simple docstring""" return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _lowercase ( self , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False ): """simple docstring""" def _flatten(__SCREAMING_SNAKE_CASE ): return list(itertools.chain(*__SCREAMING_SNAKE_CASE ) ) if equal_length: UpperCamelCase : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size UpperCamelCase : Any = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCamelCase : Dict = [np.asarray(__SCREAMING_SNAKE_CASE ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCAmelCase_ ( _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : str = WhisperFeatureExtractor if is_speech_available() else None def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = WhisperFeatureExtractionTester(self ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase : Any = feat_extract_first.save_pretrained(__SCREAMING_SNAKE_CASE )[0] check_json_file_has_correct_format(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = self.feature_extraction_class.from_pretrained(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = feat_extract_first.to_dict() UpperCamelCase : Optional[Any] = feat_extract_second.to_dict() UpperCamelCase : str = feat_extract_first.mel_filters UpperCamelCase : List[str] = feat_extract_second.mel_filters self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase : Any = os.path.join(__SCREAMING_SNAKE_CASE , '''feat_extract.json''' ) feat_extract_first.to_json_file(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = self.feature_extraction_class.from_json_file(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = feat_extract_first.to_dict() UpperCamelCase : Dict = feat_extract_second.to_dict() UpperCamelCase : Optional[Any] = feat_extract_first.mel_filters UpperCamelCase : Tuple = feat_extract_second.mel_filters self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] UpperCamelCase : Any = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs] # Test feature size UpperCamelCase : Optional[Any] = feature_extractor(__SCREAMING_SNAKE_CASE , padding='''max_length''' , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input UpperCamelCase : Optional[int] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features UpperCamelCase : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # Test batched UpperCamelCase : List[str] = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features UpperCamelCase : Optional[int] = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : List[Any] = np.asarray(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features UpperCamelCase : Any = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # Test truncation required UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] UpperCamelCase : Union[str, Any] = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs] UpperCamelCase : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs] UpperCamelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs_truncated] UpperCamelCase : Any = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features UpperCamelCase : Tuple = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def _lowercase ( self ): """simple docstring""" import torch UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Optional[Any] = np.random.rand(100 , 32 ).astype(np.floataa ) UpperCamelCase : Optional[Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : Dict = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) UpperCamelCase : Any = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : str = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech UpperCamelCase : int = ds.sort('''id''' ).select(range(__SCREAMING_SNAKE_CASE ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = torch.tensor( [ 0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951, 0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678, 0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554, -0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854 ] ) # fmt: on UpperCamelCase : Tuple = self._load_datasamples(1 ) UpperCamelCase : List[str] = WhisperFeatureExtractor() UpperCamelCase : Union[str, Any] = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_features self.assertEqual(input_features.shape , (1, 80, 3_000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : Any = self._load_datasamples(1 )[0] UpperCamelCase : Dict = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue UpperCamelCase : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__SCREAMING_SNAKE_CASE )[0] self.assertTrue(np.all(np.mean(__SCREAMING_SNAKE_CASE ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(__SCREAMING_SNAKE_CASE ) - 1 ) < 1e-3 ) )
315
import math def a ( SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" UpperCamelCase : Tuple = factor * value UpperCamelCase : Optional[int] = value while not is_prime(SCREAMING_SNAKE_CASE_ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ ) return value
315
1
import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) UpperCamelCase : Optional[int] = Vector() def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(__SCREAMING_SNAKE_CASE ) , '''(0,0,0,0,0,1)''' ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = Vector([1, 2, 3, 4] ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 4 ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = Vector([1, 2] ) UpperCamelCase : Union[str, Any] = Vector([1, 2, 3, 4, 5] ) UpperCamelCase : Dict = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) UpperCamelCase : Optional[Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = Vector([1, 2, 3] ) UpperCamelCase : Union[str, Any] = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = Vector([1, 2, 3] ) UpperCamelCase : List[Any] = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = Vector([1, 2, 3] ) UpperCamelCase : str = Vector([2, -1, 4] ) # for test of dot product UpperCamelCase : Union[str, Any] = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' ) self.assertEqual((a * b) , 0 ) def _lowercase ( self ): """simple docstring""" self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 ) def _lowercase ( self ): """simple docstring""" self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = Vector([1, 2, 3] ) UpperCamelCase : int = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , '''(3,4,7)''' ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = Vector([1, 0, 0, 0, 0, 0] ) UpperCamelCase : Tuple = x.copy() self.assertEqual(str(__SCREAMING_SNAKE_CASE ) , str(__SCREAMING_SNAKE_CASE ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(__SCREAMING_SNAKE_CASE ) , '''(0,1,0)''' ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(__SCREAMING_SNAKE_CASE ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) UpperCamelCase : Any = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) UpperCamelCase : Union[str, Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) UpperCamelCase : Optional[Any] = Vector([1, 2, 3] ) self.assertEqual('''(14,32,50)''' , str(a * x ) ) self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(__SCREAMING_SNAKE_CASE ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) UpperCamelCase : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) UpperCamelCase : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) ) def _lowercase ( self ): """simple docstring""" self.assertEqual( '''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
315
import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor __UpperCAmelCase : Optional[int] = logging.get_logger(__name__) class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" warnings.warn( '''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , ) super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
315
1
def a ( SCREAMING_SNAKE_CASE_ : list[int] ): """simple docstring""" UpperCamelCase : str = len(SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ ): for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ): if numbers[j] < numbers[i]: UpperCamelCase , UpperCamelCase : Optional[Any] = numbers[j], numbers[i] return numbers if __name__ == "__main__": __UpperCAmelCase : Any = input("Enter numbers separated by a comma:\n").strip() __UpperCAmelCase : Any = [int(item) for item in user_input.split(",")] print(exchange_sort(unsorted))
315
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ): """simple docstring""" UpperCamelCase : List[str] = size if size is not None else {'''height''': 18, '''width''': 18} UpperCamelCase : int = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : Optional[int] = num_channels UpperCamelCase : Union[str, Any] = image_size UpperCamelCase : Union[str, Any] = min_resolution UpperCamelCase : Tuple = max_resolution UpperCamelCase : List[str] = do_resize UpperCamelCase : List[str] = size UpperCamelCase : int = apply_ocr def _lowercase ( self ): """simple docstring""" return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class UpperCAmelCase_ ( _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = LayoutLMvaImageProcessingTester(self ) @property def _lowercase ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''apply_ocr''' ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def _lowercase ( self ): """simple docstring""" pass def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE ) self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE ) # Test batched UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCamelCase : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCamelCase : Optional[int] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = LayoutLMvaImageProcessor() from datasets import load_dataset UpperCamelCase : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' ) UpperCamelCase : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 UpperCamelCase : Union[str, Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 UpperCamelCase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE ) self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE ) # with apply_OCR = False UpperCamelCase : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
315
1
from __future__ import annotations class UpperCAmelCase_ : '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Dict = TypeError( '''Matrices must be formed from a list of zero or more lists containing at ''' '''least one and the same number of values, each of which must be of type ''' '''int or float.''' ) if len(__SCREAMING_SNAKE_CASE ) != 0: UpperCamelCase : Tuple = len(rows[0] ) if cols == 0: raise error for row in rows: if len(__SCREAMING_SNAKE_CASE ) != cols: raise error for value in row: if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ): raise error UpperCamelCase : Optional[int] = rows else: UpperCamelCase : Optional[Any] = [] def _lowercase ( self ): """simple docstring""" return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def _lowercase ( self ): """simple docstring""" return len(self.rows ) @property def _lowercase ( self ): """simple docstring""" return len(self.rows[0] ) @property def _lowercase ( self ): """simple docstring""" return (self.num_rows, self.num_columns) @property def _lowercase ( self ): """simple docstring""" return self.order[0] == self.order[1] def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def _lowercase ( self ): """simple docstring""" return bool(self.determinant() ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(__SCREAMING_SNAKE_CASE ).determinant() def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" if (row + column) % 2 == 0: return self.get_minor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return -1 * self.get_minor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" return Matrix( [ [self.get_minor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def _lowercase ( self ): """simple docstring""" return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = self.determinant() if not determinant: raise TypeError('''Only matrices with a non-zero determinant have an inverse''' ) return self.adjugate() * (1 / determinant) def __repr__( self ): """simple docstring""" return str(self.rows ) def __str__( self ): """simple docstring""" if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '''[''' + '''. '''.join([str(__SCREAMING_SNAKE_CASE ) for value in row] ) + '''.]''' for row in self.rows ] ) + "]" ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : str = TypeError('''Row must be a list containing all ints and/or floats''' ) if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise type_error for value in row: if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ): raise type_error if len(__SCREAMING_SNAKE_CASE ) != self.num_columns: raise ValueError( '''Row must be equal in length to the other rows in the matrix''' ) if position is None: self.rows.append(__SCREAMING_SNAKE_CASE ) else: UpperCamelCase : Union[str, Any] = self.rows[0:position] + [row] + self.rows[position:] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : Tuple = TypeError( '''Column must be a list containing all ints and/or floats''' ) if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise type_error for value in column: if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ): raise type_error if len(__SCREAMING_SNAKE_CASE ) != self.num_rows: raise ValueError( '''Column must be equal in length to the other columns in the matrix''' ) if position is None: UpperCamelCase : Optional[Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: UpperCamelCase : Dict = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return NotImplemented return self.rows == other.rows def __ne__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return not self == other def __neg__( self ): """simple docstring""" return self * -1 def __add__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if self.order != other.order: raise ValueError('''Addition requires matrices of the same order''' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if self.order != other.order: raise ValueError('''Subtraction requires matrices of the same order''' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if self.num_columns != other.num_rows: raise ValueError( '''The number of columns in the first matrix must ''' '''be equal to the number of rows in the second''' ) return Matrix( [ [Matrix.dot_product(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( '''A Matrix can only be multiplied by an int, float, or another matrix''' ) def __pow__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError('''A Matrix can only be raised to the power of an int''' ) if not self.is_square: raise ValueError('''Only square matrices can be raised to a power''' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( '''Only invertable matrices can be raised to a negative power''' ) UpperCamelCase : List[str] = self for _ in range(other - 1 ): result *= self return result @classmethod def _lowercase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" return sum(row[i] * column[i] for i in range(len(__SCREAMING_SNAKE_CASE ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
315
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def a ( SCREAMING_SNAKE_CASE_ : dict ): """simple docstring""" return (data["data"], data["target"]) def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ): """simple docstring""" UpperCamelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 ) xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Predict target for test data UpperCamelCase : Any = xgb.predict(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 ) return predictions def a ( ): """simple docstring""" UpperCamelCase : Tuple = fetch_california_housing() UpperCamelCase , UpperCamelCase : Tuple = data_handling(SCREAMING_SNAKE_CASE_ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = train_test_split( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 ) UpperCamelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Error printing print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" ) print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
315
1
import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer __UpperCAmelCase : Tuple = logging.get_logger(__name__) __UpperCAmelCase : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} __UpperCAmelCase : int = { "vocab_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json", }, "merges_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt", }, "tokenizer_file": { "Salesforce/codegen-350M-mono": ( "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json" ), }, } __UpperCAmelCase : Tuple = { "Salesforce/codegen-350M-mono": 2048, } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES __UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Any = ["input_ids", "attention_mask"] __UpperCamelCase : Optional[Any] = CodeGenTokenizer def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) if kwargs.pop('''add_bos_token''' , __SCREAMING_SNAKE_CASE ): UpperCamelCase : int = kwargs.pop('''name_or_path''' , '''''' ) raise ValueError( '''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.''' '''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n''' f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n""" f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n""" '''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.''' ''' so that the fast tokenizer works correctly.''' ) UpperCamelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space: UpperCamelCase : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) ) UpperCamelCase : Any = add_prefix_space UpperCamelCase : Optional[int] = pre_tok_class(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = add_prefix_space def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Union[str, Any] = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : Optional[int] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE ) return tuple(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : Union[str, Any] = super().decode( token_ids=__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) if truncate_before_pattern is not None and len(__SCREAMING_SNAKE_CASE ) > 0: UpperCamelCase : Optional[int] = self.truncate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return decoded_text def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" def find_re(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[int] = pattern.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return m.start() if m else -1 UpperCamelCase : List[str] = [re.compile(__SCREAMING_SNAKE_CASE , re.MULTILINE ) for pattern in truncate_before_pattern] UpperCamelCase : Optional[int] = list(re.finditer('''^print''' , __SCREAMING_SNAKE_CASE , re.MULTILINE ) ) if len(__SCREAMING_SNAKE_CASE ) > 1: UpperCamelCase : Dict = completion[: prints[1].start()] UpperCamelCase : Union[str, Any] = list(re.finditer('''^def''' , __SCREAMING_SNAKE_CASE , re.MULTILINE ) ) if len(__SCREAMING_SNAKE_CASE ) > 1: UpperCamelCase : Any = completion[: defs[1].start()] UpperCamelCase : Optional[Any] = 0 UpperCamelCase : Optional[int] = [ pos for pos in [find_re(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for terminal in terminals] if pos != -1 ] if len(__SCREAMING_SNAKE_CASE ) > 0: return completion[: min(__SCREAMING_SNAKE_CASE )] else: return completion
315
__UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" __UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}] __UpperCAmelCase : Union[str, Any] = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
315
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __UpperCAmelCase : Union[str, Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : str = ["DeiTFeatureExtractor"] __UpperCAmelCase : Tuple = ["DeiTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : List[str] = [ "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", "DeiTModel", "DeiTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : int = [ "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", "TFDeiTModel", "TFDeiTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys __UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
315
import collections import os import re from pathlib import Path __UpperCAmelCase : List[str] = "src/transformers" # Matches is_xxx_available() __UpperCAmelCase : int = re.compile(r"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} __UpperCAmelCase : Optional[int] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __UpperCAmelCase : List[Any] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available __UpperCAmelCase : List[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") __UpperCAmelCase : str = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", __UpperCAmelCase : Dict = re.compile(r"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], __UpperCAmelCase : str = re.compile(r"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo __UpperCAmelCase : str = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: __UpperCAmelCase : Any = re.compile(r"^\s*try:") # Catches a line with else: __UpperCAmelCase : List[Any] = re.compile(r"^\s*else:") def a ( SCREAMING_SNAKE_CASE_ : Dict ): """simple docstring""" if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None: return None UpperCamelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )] backends.sort() return "_and_".join(SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCamelCase : Tuple = f.readlines() UpperCamelCase : Tuple = 0 while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(SCREAMING_SNAKE_CASE_ ): return None # First grab the objects without a specific backend in _import_structure UpperCamelCase : List[Any] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: UpperCamelCase : Optional[int] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0] UpperCamelCase : str = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue UpperCamelCase : List[Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 UpperCamelCase : Dict = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCamelCase : Dict = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCamelCase : Optional[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCamelCase : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): UpperCamelCase : str = lines[line_index] if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None: objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] ) elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None: UpperCamelCase : Union[str, Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' ) UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None: UpperCamelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' ) UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None: objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 1_2 + '''"''' ): objects.append(line[1_3:-3] ) line_index += 1 UpperCamelCase : Tuple = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCamelCase : int = [] while ( line_index < len(SCREAMING_SNAKE_CASE_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): UpperCamelCase : Tuple = lines[line_index] UpperCamelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCamelCase : Any = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(SCREAMING_SNAKE_CASE_ ): # If the line is an if is_backend_available, we grab all objects associated. UpperCamelCase : Optional[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCamelCase : Dict = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCamelCase : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): UpperCamelCase : Optional[Any] = lines[line_index] UpperCamelCase : str = _re_import.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 1_2 ): objects.append(line[1_2:-2] ) line_index += 1 UpperCamelCase : str = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ): """simple docstring""" def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ): return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCamelCase : Dict = [] for key in import_dict_objects.keys(): UpperCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCamelCase : List[str] = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def a ( ): """simple docstring""" UpperCamelCase : Any = [] for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ): if "__init__.py" in files: UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) UpperCamelCase : Optional[int] = parse_init(SCREAMING_SNAKE_CASE_ ) if objects is not None: UpperCamelCase : str = analyze_results(*SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: UpperCamelCase : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) if len(SCREAMING_SNAKE_CASE_ ) > 0: raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) ) def a ( ): """simple docstring""" UpperCamelCase : Dict = [] for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(SCREAMING_SNAKE_CASE_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0: continue UpperCamelCase : List[str] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : str = short_path.replace(os.path.sep , '''.''' ) submodules.append(SCREAMING_SNAKE_CASE_ ) for fname in files: if fname == "__init__.py": continue UpperCamelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(SCREAMING_SNAKE_CASE_ ) return submodules __UpperCAmelCase : Optional[int] = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def a ( ): """simple docstring""" from transformers.utils import direct_transformers_import UpperCamelCase : Tuple = direct_transformers_import(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , '''r''' ) as f: UpperCamelCase : List[Any] = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE_ ) ) ) UpperCamelCase : Union[str, Any] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(SCREAMING_SNAKE_CASE_ ) > 0: UpperCamelCase : str = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
315
1
from collections.abc import Callable def a ( SCREAMING_SNAKE_CASE_ : Callable[[float], float] , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ): """simple docstring""" UpperCamelCase : float = a UpperCamelCase : float = b if function(SCREAMING_SNAKE_CASE_ ) == 0: # one of the a or b is a root for the function return a elif function(SCREAMING_SNAKE_CASE_ ) == 0: return b elif ( function(SCREAMING_SNAKE_CASE_ ) * function(SCREAMING_SNAKE_CASE_ ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: UpperCamelCase : float = start + (end - start) / 2.0 while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7 if function(SCREAMING_SNAKE_CASE_ ) == 0: return mid elif function(SCREAMING_SNAKE_CASE_ ) * function(SCREAMING_SNAKE_CASE_ ) < 0: UpperCamelCase : str = mid else: UpperCamelCase : Dict = mid UpperCamelCase : Union[str, Any] = start + (end - start) / 2.0 return mid def a ( SCREAMING_SNAKE_CASE_ : float ): """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
315
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" UpperCamelCase : Any = set() # Replace all the whitespace in our sentence UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(SCREAMING_SNAKE_CASE_ ) == 2_6 def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" UpperCamelCase : str = [False] * 2_6 for char in input_str: if char.islower(): UpperCamelCase : List[Any] = True elif char.isupper(): UpperCamelCase : List[Any] = True return all(SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6 def a ( ): """simple docstring""" from timeit import timeit UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest''' print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) ) print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) ) print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
315
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase_ ( _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : Optional[Any] = LDMTextToImagePipeline __UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } __UpperCamelCase : Tuple = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } __UpperCamelCase : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCamelCase : Optional[Any] = False def _lowercase ( self ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase : int = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) UpperCamelCase : Union[str, Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , ) torch.manual_seed(0 ) UpperCamelCase : Union[str, Any] = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , ) torch.manual_seed(0 ) UpperCamelCase : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) UpperCamelCase : str = CLIPTextModel(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCamelCase : List[Any] = { '''unet''': unet, '''scheduler''': scheduler, '''vqvae''': vae, '''bert''': text_encoder, '''tokenizer''': tokenizer, } return components def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ): """simple docstring""" if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ): UpperCamelCase : List[str] = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: UpperCamelCase : List[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCamelCase : Dict = self.get_dummy_components() UpperCamelCase : Optional[Any] = LDMTextToImagePipeline(**__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = pipe(**__SCREAMING_SNAKE_CASE ).images UpperCamelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) UpperCamelCase : Optional[Any] = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def _lowercase ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=torch.floataa , __SCREAMING_SNAKE_CASE=0 ): """simple docstring""" UpperCamelCase : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = np.random.RandomState(__SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 32, 32) ) UpperCamelCase : str = torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = self.get_inputs(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = pipe(**__SCREAMING_SNAKE_CASE ).images UpperCamelCase : Any = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) UpperCamelCase : Optional[int] = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] ) UpperCamelCase : Union[str, Any] = np.abs(expected_slice - image_slice ).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def _lowercase ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=torch.floataa , __SCREAMING_SNAKE_CASE=0 ): """simple docstring""" UpperCamelCase : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = np.random.RandomState(__SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 32, 32) ) UpperCamelCase : Dict = torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = { '''prompt''': '''A painting of a squirrel eating a burger''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 50, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = self.get_inputs(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = pipe(**__SCREAMING_SNAKE_CASE ).images[0] UpperCamelCase : Optional[Any] = load_numpy( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' ) UpperCamelCase : Optional[int] = np.abs(expected_image - image ).max() assert max_diff < 1e-3
315
import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) __UpperCAmelCase : Union[str, Any] = logging.getLogger() def a ( ): """simple docstring""" UpperCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument('''-f''' ) UpperCamelCase : List[str] = parser.parse_args() return args.f class UpperCAmelCase_ ( _a): '''simple docstring''' def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout ) logger.addHandler(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Dict = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , '''run_glue_deebert.py''' ) with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ): UpperCamelCase : int = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(__SCREAMING_SNAKE_CASE , 0.666 ) @slow @require_torch_non_multi_gpu def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = ''' --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage '''.split() self.run_and_check(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(__SCREAMING_SNAKE_CASE )
315
1
import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup __UpperCAmelCase : List[str] = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" " (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582" } def a ( SCREAMING_SNAKE_CASE_ : str = "dhaka" , SCREAMING_SNAKE_CASE_ : int = 5 ): """simple docstring""" UpperCamelCase : Dict = min(SCREAMING_SNAKE_CASE_ , 5_0 ) # Prevent abuse! UpperCamelCase : int = { '''q''': query, '''tbm''': '''isch''', '''hl''': '''en''', '''ijn''': '''0''', } UpperCamelCase : Optional[int] = requests.get('''https://www.google.com/search''' , params=SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = BeautifulSoup(html.text , '''html.parser''' ) UpperCamelCase : Optional[int] = ''''''.join( re.findall(R'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) ) UpperCamelCase : List[str] = json.dumps(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = json.loads(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = re.findall( R'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , SCREAMING_SNAKE_CASE_ , ) if not matched_google_image_data: return 0 UpperCamelCase : str = re.sub( R'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(SCREAMING_SNAKE_CASE_ ) , ) UpperCamelCase : List[Any] = re.findall( R'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , SCREAMING_SNAKE_CASE_ , ) for index, fixed_full_res_image in enumerate(SCREAMING_SNAKE_CASE_ ): if index >= max_images: return index UpperCamelCase : Union[str, Any] = bytes(SCREAMING_SNAKE_CASE_ , '''ascii''' ).decode( '''unicode-escape''' ) UpperCamelCase : Any = bytes(SCREAMING_SNAKE_CASE_ , '''ascii''' ).decode( '''unicode-escape''' ) UpperCamelCase : List[str] = urllib.request.build_opener() UpperCamelCase : Optional[Any] = [ ( '''User-Agent''', '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36''' ''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''', ) ] urllib.request.install_opener(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = F"""query_{query.replace(" " , "_" )}""" if not os.path.exists(SCREAMING_SNAKE_CASE_ ): os.makedirs(SCREAMING_SNAKE_CASE_ ) urllib.request.urlretrieve( # noqa: S310 SCREAMING_SNAKE_CASE_ , F"""{path_name}/original_size_img_{index}.jpg""" ) return index if __name__ == "__main__": try: __UpperCAmelCase : List[Any] = download_images_from_google_query(sys.argv[1]) print(f'''{image_count} images were downloaded to disk.''') except IndexError: print("Please provide a search term.") raise
315
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase : Tuple = logging.get_logger(__name__) __UpperCAmelCase : Union[str, Any] = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[Any] = "ibert" def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="none" , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = vocab_size UpperCamelCase : Optional[int] = hidden_size UpperCamelCase : Tuple = num_hidden_layers UpperCamelCase : Optional[Any] = num_attention_heads UpperCamelCase : Dict = hidden_act UpperCamelCase : Union[str, Any] = intermediate_size UpperCamelCase : str = hidden_dropout_prob UpperCamelCase : Any = attention_probs_dropout_prob UpperCamelCase : Dict = max_position_embeddings UpperCamelCase : Union[str, Any] = type_vocab_size UpperCamelCase : Optional[Any] = initializer_range UpperCamelCase : Union[str, Any] = layer_norm_eps UpperCamelCase : Dict = position_embedding_type UpperCamelCase : int = quant_mode UpperCamelCase : Any = force_dequant class UpperCAmelCase_ ( _a): '''simple docstring''' @property def _lowercase ( self ): """simple docstring""" if self.task == "multiple-choice": UpperCamelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
315
1
__UpperCAmelCase : Tuple = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], "C": ["A", "F", "G"], "D": ["B"], "E": ["A", "B", "D"], "F": ["C"], "G": ["C"], } def a ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ): """simple docstring""" UpperCamelCase : Dict = set() # keep track of all the paths to be checked UpperCamelCase : int = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue UpperCamelCase : Union[str, Any] = queue.pop(0 ) # get the last node from the path UpperCamelCase : Tuple = path[-1] if node not in explored: UpperCamelCase : int = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: UpperCamelCase : str = list(SCREAMING_SNAKE_CASE_ ) new_path.append(SCREAMING_SNAKE_CASE_ ) queue.append(SCREAMING_SNAKE_CASE_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(SCREAMING_SNAKE_CASE_ ) # in case there's no path between the 2 nodes return [] def a ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 UpperCamelCase : Union[str, Any] = [start] UpperCamelCase : List[Any] = set(SCREAMING_SNAKE_CASE_ ) # Keep tab on distances from `start` node. UpperCamelCase : Dict = {start: 0, target: -1} while queue: UpperCamelCase : Dict = queue.pop(0 ) if node == target: UpperCamelCase : Dict = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(SCREAMING_SNAKE_CASE_ ) queue.append(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
315
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup __UpperCAmelCase : int = logging.get_logger(__name__) class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[Any] = [] UpperCamelCase : int = [] UpperCamelCase : List[Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag UpperCamelCase : Tuple = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) ) UpperCamelCase : Optional[Any] = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[Any] = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' ) UpperCamelCase : Union[str, Any] = [] UpperCamelCase : List[str] = [] UpperCamelCase : str = [] for element in html_code.descendants: if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue UpperCamelCase : Any = html.unescape(__SCREAMING_SNAKE_CASE ).strip() if not text_in_this_tag: continue all_doc_strings.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase , UpperCamelCase : int = self.xpath_soup(__SCREAMING_SNAKE_CASE ) stringaxtag_seq.append(__SCREAMING_SNAKE_CASE ) stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[Any] = '''''' for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): xpath += f"""/{tagname}""" if subs != 0: xpath += f"""[{subs}]""" return xpath def __call__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = False # Check that strings has a valid type if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[Any] = True elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ): if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ): UpperCamelCase : List[str] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' f"""but is of type {type(__SCREAMING_SNAKE_CASE )}.""" ) UpperCamelCase : int = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) ) if not is_batched: UpperCamelCase : Union[str, Any] = [html_strings] # Get nodes + xpaths UpperCamelCase : str = [] UpperCamelCase : int = [] for html_string in html_strings: UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self.get_three_from_single(__SCREAMING_SNAKE_CASE ) nodes.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = [] for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : str = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) xpath_strings.append(__SCREAMING_SNAKE_CASE ) xpaths.append(__SCREAMING_SNAKE_CASE ) # return as Dict UpperCamelCase : List[str] = {'''nodes''': nodes, '''xpaths''': xpaths} UpperCamelCase : List[Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) return encoded_inputs
315
1
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
315
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params __UpperCAmelCase : List[str] = getLogger(__name__) __UpperCAmelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu" def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : int="summarization" , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Any , ): """simple docstring""" UpperCamelCase : Dict = Path(SCREAMING_SNAKE_CASE_ ).open('''w''' , encoding='''utf-8''' ) UpperCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) if fpaa: UpperCamelCase : List[Any] = model.half() UpperCamelCase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type. UpperCamelCase : int = time.time() # update config with task specific params use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if prefix is None: UpperCamelCase : Union[str, Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ): UpperCamelCase : Optional[int] = [prefix + text for text in examples_chunk] UpperCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ , padding='''longest''' ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE_ , ) UpperCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) for hypothesis in dec: fout.write(hypothesis + '''\n''' ) fout.flush() fout.close() UpperCamelCase : str = int(time.time() - start_time ) # seconds UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def a ( ): """simple docstring""" return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' ) def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ): """simple docstring""" UpperCamelCase : int = argparse.ArgumentParser() parser.add_argument('''model_name''' , type=SCREAMING_SNAKE_CASE_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''input_path''' , type=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.source''' ) parser.add_argument('''save_path''' , type=SCREAMING_SNAKE_CASE_ , help='''where to save summaries''' ) parser.add_argument('''--reference_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.target''' ) parser.add_argument('''--score_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default='''metrics.json''' , help='''where to save metrics''' ) parser.add_argument('''--device''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''cuda, cuda:1, cpu etc.''' ) parser.add_argument( '''--prefix''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--task''' , type=SCREAMING_SNAKE_CASE_ , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help='''batch size''' ) parser.add_argument( '''--n_obs''' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='''How many observations. Defaults to all.''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' ) parser.add_argument( '''--info''' , nargs='''?''' , type=SCREAMING_SNAKE_CASE_ , const=datetime_now() , help=( '''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.''' ''' lang=en-ru. If no value is passed, the current datetime string will be used.''' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate UpperCamelCase , UpperCamelCase : int = parser.parse_known_args() UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ ) if parsed_args and verbose: print(F"""parsed the following generate kwargs: {parsed_args}""" ) UpperCamelCase : str = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: UpperCamelCase : Tuple = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('''Can\'t mix --fp16 and --device cpu''' ) UpperCamelCase : str = generate_summaries_or_translations( SCREAMING_SNAKE_CASE_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE_ , ) if args.reference_path is None: return {} # Compute scores UpperCamelCase : Tuple = calculate_bleu if '''translation''' in args.task else calculate_rouge UpperCamelCase : Dict = [x.rstrip() for x in open(args.save_path ).readlines()] UpperCamelCase : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE_ )] UpperCamelCase : dict = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) scores.update(SCREAMING_SNAKE_CASE_ ) if args.dump_args: scores.update(SCREAMING_SNAKE_CASE_ ) if args.info: UpperCamelCase : Optional[Any] = args.info if verbose: print(SCREAMING_SNAKE_CASE_ ) if args.score_path is not None: json.dump(SCREAMING_SNAKE_CASE_ , open(args.score_path , '''w''' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
315
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase __UpperCAmelCase : Dict = logging.get_logger(__name__) __UpperCAmelCase : int = { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json" ), } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Union[str, Any] = "longformer" def __init__( self , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 30_522 , __SCREAMING_SNAKE_CASE = 768 , __SCREAMING_SNAKE_CASE = 12 , __SCREAMING_SNAKE_CASE = 12 , __SCREAMING_SNAKE_CASE = 3_072 , __SCREAMING_SNAKE_CASE = "gelu" , __SCREAMING_SNAKE_CASE = 0.1 , __SCREAMING_SNAKE_CASE = 0.1 , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 0.02 , __SCREAMING_SNAKE_CASE = 1e-12 , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = attention_window UpperCamelCase : Optional[int] = sep_token_id UpperCamelCase : str = bos_token_id UpperCamelCase : Tuple = eos_token_id UpperCamelCase : int = vocab_size UpperCamelCase : Optional[Any] = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Any = num_attention_heads UpperCamelCase : List[str] = hidden_act UpperCamelCase : Any = intermediate_size UpperCamelCase : Optional[Any] = hidden_dropout_prob UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : Optional[int] = type_vocab_size UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : List[str] = layer_norm_eps UpperCamelCase : Union[str, Any] = onnx_export class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "default" , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = True @property def _lowercase ( self ): """simple docstring""" if self.task == "multiple-choice": UpperCamelCase : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''global_attention_mask''', dynamic_axis), ] ) @property def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = super().outputs if self.task == "default": UpperCamelCase : Any = {0: '''batch'''} return outputs @property def _lowercase ( self ): """simple docstring""" return 1e-4 @property def _lowercase ( self ): """simple docstring""" return max(super().default_onnx_opset , 14 ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ): """simple docstring""" UpperCamelCase : str = super().generate_dummy_inputs( preprocessor=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly UpperCamelCase : Optional[int] = torch.zeros_like(inputs['''input_ids'''] ) # make every second token global UpperCamelCase : str = 1 return inputs
315
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : int = ["image_processor", "tokenizer"] __UpperCamelCase : List[str] = "AutoImageProcessor" __UpperCamelCase : Optional[Any] = "AutoTokenizer" def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __SCREAMING_SNAKE_CASE , ) UpperCamelCase : Any = kwargs.pop('''feature_extractor''' ) UpperCamelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = self.image_processor UpperCamelCase : int = False def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = kwargs.pop('''images''' , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = kwargs.pop('''text''' , __SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: UpperCamelCase : Union[str, Any] = args[0] UpperCamelCase : str = args[1:] if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: UpperCamelCase : List[str] = self.image_processor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if text is not None: UpperCamelCase : Optional[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if text is None: return inputs elif images is None: return encodings else: UpperCamelCase : List[str] = encodings['''input_ids'''] return inputs def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @contextmanager def _lowercase ( self ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your images inputs, or in a separate call.''' ) UpperCamelCase : Any = True UpperCamelCase : int = self.tokenizer yield UpperCamelCase : List[Any] = self.image_processor UpperCamelCase : Tuple = False def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" if added_vocab is None: UpperCamelCase : str = self.tokenizer.get_added_vocab() UpperCamelCase : int = {} while tokens: UpperCamelCase : Dict = re.search(R'''<s_(.*?)>''' , __SCREAMING_SNAKE_CASE , re.IGNORECASE ) if start_token is None: break UpperCamelCase : List[str] = start_token.group(1 ) UpperCamelCase : Dict = re.search(Rf"""</s_{key}>""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE ) UpperCamelCase : Any = start_token.group() if end_token is None: UpperCamelCase : Optional[int] = tokens.replace(__SCREAMING_SNAKE_CASE , '''''' ) else: UpperCamelCase : Dict = end_token.group() UpperCamelCase : int = re.escape(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = re.escape(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE ) if content is not None: UpperCamelCase : Dict = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node UpperCamelCase : Tuple = self.tokenajson(__SCREAMING_SNAKE_CASE , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE ) if value: if len(__SCREAMING_SNAKE_CASE ) == 1: UpperCamelCase : str = value[0] UpperCamelCase : str = value else: # leaf nodes UpperCamelCase : Optional[int] = [] for leaf in content.split(R'''<sep/>''' ): UpperCamelCase : Optional[int] = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": UpperCamelCase : int = leaf[1:-2] # for categorical special tokens output[key].append(__SCREAMING_SNAKE_CASE ) if len(output[key] ) == 1: UpperCamelCase : Tuple = output[key][0] UpperCamelCase : List[Any] = tokens[tokens.find(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def _lowercase ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def _lowercase ( self ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor
315
1
class UpperCAmelCase_ : '''simple docstring''' def __init__( self ): """simple docstring""" UpperCamelCase : str = {} def _lowercase ( self ): """simple docstring""" print(self.vertex ) for i in self.vertex: print(__SCREAMING_SNAKE_CASE , ''' -> ''' , ''' -> '''.join([str(__SCREAMING_SNAKE_CASE ) for j in self.vertex[i]] ) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" if from_vertex in self.vertex: self.vertex[from_vertex].append(__SCREAMING_SNAKE_CASE ) else: # else make a new vertex UpperCamelCase : Union[str, Any] = [to_vertex] def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = True print(__SCREAMING_SNAKE_CASE , end=''' ''' ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __UpperCAmelCase : Union[str, Any] = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("DFS:") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
315
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase : Union[str, Any] = { "configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"], "processing_mgp_str": ["MgpstrProcessor"], "tokenization_mgp_str": ["MgpstrTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Union[str, Any] = [ "MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST", "MgpstrModel", "MgpstrPreTrainedModel", "MgpstrForSceneTextRecognition", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys __UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
315
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: __UpperCAmelCase : Union[str, Any] = None __UpperCAmelCase : Tuple = logging.get_logger(__name__) __UpperCAmelCase : int = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"} __UpperCAmelCase : Any = { "vocab_file": { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model", }, "tokenizer_file": { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json", }, } __UpperCAmelCase : Tuple = { "google/rembert": 256, } __UpperCAmelCase : Union[str, Any] = "▁" class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : str = VOCAB_FILES_NAMES __UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Tuple = RemBertTokenizer def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : int = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[str] = do_lower_case UpperCamelCase : Tuple = remove_space UpperCamelCase : Tuple = keep_accents UpperCamelCase : Optional[Any] = vocab_file UpperCamelCase : List[Any] = False if not self.vocab_file else True def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : List[str] = [self.sep_token_id] UpperCamelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : List[str] = [self.sep_token_id] UpperCamelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error('''Vocabulary path ({}) should be a directory'''.format(__SCREAMING_SNAKE_CASE ) ) return UpperCamelCase : Optional[int] = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
315
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ): """simple docstring""" UpperCamelCase : List[str] = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
315
1
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int __UpperCAmelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class UpperCAmelCase_ ( datasets.BuilderConfig): '''simple docstring''' __UpperCamelCase : Optional[datasets.Features] = None def a ( SCREAMING_SNAKE_CASE_ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE_ : List[int] , ): """simple docstring""" import pyspark def generate_fn(): UpperCamelCase : Union[str, Any] = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) ) for partition_id in partition_order: UpperCamelCase : str = df_with_partition_id.select('''*''' ).where(F"""part_id = {partition_id}""" ).drop('''part_id''' ) UpperCamelCase : Any = partition_df.collect() UpperCamelCase : List[Any] = 0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class UpperCAmelCase_ ( _BaseExamplesIterable): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , ): """simple docstring""" UpperCamelCase : Dict = df UpperCamelCase : int = partition_order or range(self.df.rdd.getNumPartitions() ) UpperCamelCase : Any = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self ): """simple docstring""" yield from self.generate_examples_fn() def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(__SCREAMING_SNAKE_CASE ) return SparkExamplesIterable(self.df , partition_order=__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Union[str, Any] = self.split_shard_indices_by_worker(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return SparkExamplesIterable(self.df , partition_order=__SCREAMING_SNAKE_CASE ) @property def _lowercase ( self ): """simple docstring""" return len(self.partition_order ) class UpperCAmelCase_ ( datasets.DatasetBuilder): '''simple docstring''' __UpperCamelCase : str = SparkConfig def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" import pyspark UpperCamelCase : List[str] = pyspark.sql.SparkSession.builder.getOrCreate() UpperCamelCase : Tuple = df UpperCamelCase : str = working_dir super().__init__( cache_dir=__SCREAMING_SNAKE_CASE , config_name=str(self.df.semanticHash() ) , **__SCREAMING_SNAKE_CASE , ) def _lowercase ( self ): """simple docstring""" def create_cache_and_write_probe(__SCREAMING_SNAKE_CASE ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(__SCREAMING_SNAKE_CASE , '''a''' ) return [probe_file] if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: UpperCamelCase : List[str] = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__SCREAMING_SNAKE_CASE ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( '''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' ) def _lowercase ( self ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" import pyspark def get_arrow_batch_size(__SCREAMING_SNAKE_CASE ): for batch in it: yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} ) UpperCamelCase : Tuple = self.df.count() UpperCamelCase : Optional[int] = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. UpperCamelCase : Optional[Any] = ( self.df.limit(__SCREAMING_SNAKE_CASE ) .repartition(1 ) .mapInArrow(__SCREAMING_SNAKE_CASE , '''batch_bytes: long''' ) .agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) ) .collect()[0] .sample_bytes / sample_num_rows ) UpperCamelCase : Any = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. UpperCamelCase : Union[str, Any] = min(__SCREAMING_SNAKE_CASE , int(approx_total_size / max_shard_size ) ) UpperCamelCase : Any = self.df.repartition(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ): """simple docstring""" import pyspark UpperCamelCase : List[Any] = ParquetWriter if file_format == '''parquet''' else ArrowWriter UpperCamelCase : List[str] = os.path.join(self._working_dir , os.path.basename(__SCREAMING_SNAKE_CASE ) ) if self._working_dir else fpath UpperCamelCase : List[Any] = file_format == '''parquet''' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. UpperCamelCase : Dict = self.config.features UpperCamelCase : List[Any] = self._writer_batch_size UpperCamelCase : Optional[int] = self._fs.storage_options def write_arrow(__SCREAMING_SNAKE_CASE ): # Within the same SparkContext, no two task attempts will share the same attempt ID. UpperCamelCase : Optional[Any] = pyspark.TaskContext().taskAttemptId() UpperCamelCase : Union[str, Any] = next(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) UpperCamelCase : List[Any] = 0 UpperCamelCase : Optional[Any] = writer_class( features=__SCREAMING_SNAKE_CASE , path=working_fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , writer_batch_size=__SCREAMING_SNAKE_CASE , storage_options=__SCREAMING_SNAKE_CASE , embed_local_files=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : str = pa.Table.from_batches([first_batch] ) writer.write_table(__SCREAMING_SNAKE_CASE ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: UpperCamelCase , UpperCamelCase : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) shard_id += 1 UpperCamelCase : int = writer_class( features=writer._features , path=working_fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , writer_batch_size=__SCREAMING_SNAKE_CASE , storage_options=__SCREAMING_SNAKE_CASE , embed_local_files=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Optional[int] = pa.Table.from_batches([batch] ) writer.write_table(__SCREAMING_SNAKE_CASE ) if writer._num_bytes > 0: UpperCamelCase , UpperCamelCase : List[Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(__SCREAMING_SNAKE_CASE ) ): UpperCamelCase : List[str] = os.path.join(os.path.dirname(__SCREAMING_SNAKE_CASE ) , os.path.basename(__SCREAMING_SNAKE_CASE ) ) shutil.move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : int = ( self.df.mapInArrow(__SCREAMING_SNAKE_CASE , '''task_id: long, num_examples: long, num_bytes: long''' ) .groupBy('''task_id''' ) .agg( pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "arrow" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" self._validate_cache_dir() UpperCamelCase : str = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = not is_remote_filesystem(self._fs ) UpperCamelCase : Optional[Any] = os.path.join if is_local else posixpath.join UpperCamelCase : str = '''-TTTTT-SSSSS-of-NNNNN''' UpperCamelCase : Dict = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" UpperCamelCase : Optional[int] = path_join(self._output_dir , __SCREAMING_SNAKE_CASE ) UpperCamelCase : int = 0 UpperCamelCase : int = 0 UpperCamelCase : List[Any] = 0 UpperCamelCase : List[str] = [] UpperCamelCase : Optional[int] = [] for task_id, content in self._prepare_split_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Optional[Any] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = total_num_examples UpperCamelCase : str = total_num_bytes # should rename everything at the end logger.debug(f"""Renaming {total_shards} shards.""" ) if total_shards > 1: UpperCamelCase : Union[str, Any] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. UpperCamelCase : Any = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ): rename( __SCREAMING_SNAKE_CASE , fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , fpath.replace('''TTTTT-SSSSS''' , f"""{global_shard_id:05d}""" ).replace('''NNNNN''' , f"""{total_shards:05d}""" ) , ) UpperCamelCase : Optional[int] = [] UpperCamelCase : str = 0 for i in range(len(__SCREAMING_SNAKE_CASE ) ): UpperCamelCase , UpperCamelCase : Optional[int] = task_id_and_num_shards[i] for shard_id in range(__SCREAMING_SNAKE_CASE ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ).map(lambda __SCREAMING_SNAKE_CASE : _rename_shard(*__SCREAMING_SNAKE_CASE ) ).collect() else: # don't use any pattern UpperCamelCase : Optional[int] = 0 UpperCamelCase : Dict = task_id_and_num_shards[0][0] self._rename( fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , fpath.replace(__SCREAMING_SNAKE_CASE , '''''' ) , ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , ): """simple docstring""" return SparkExamplesIterable(self.df )
315
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = eval_examples UpperCamelCase : Optional[Any] = post_process_function def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ): """simple docstring""" UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCamelCase : Any = self.compute_metrics UpperCamelCase : List[Any] = None UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCamelCase : Dict = time.time() try: UpperCamelCase : str = eval_loop( __SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: UpperCamelCase : Union[str, Any] = compute_metrics UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions ) UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE ) metrics.update(output.metrics ) else: UpperCamelCase : List[Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(__SCREAMING_SNAKE_CASE ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE ) return metrics def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ): """simple docstring""" UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE ) # Temporarily disable metric computation, we will do it in the loop here. UpperCamelCase : Union[str, Any] = self.compute_metrics UpperCamelCase : Tuple = None UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCamelCase : Optional[int] = time.time() try: UpperCamelCase : int = eval_loop( __SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: UpperCamelCase : int = compute_metrics UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' ) UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
315
1
import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , ): """simple docstring""" UpperCamelCase : Union[str, Any] = parent UpperCamelCase : Optional[int] = vocab_size UpperCamelCase : int = batch_size UpperCamelCase : str = image_size UpperCamelCase : Dict = patch_size UpperCamelCase : Optional[Any] = num_channels UpperCamelCase : str = is_training UpperCamelCase : List[Any] = use_labels UpperCamelCase : Optional[int] = hidden_size UpperCamelCase : int = num_hidden_layers UpperCamelCase : Union[str, Any] = num_attention_heads UpperCamelCase : List[str] = intermediate_size UpperCamelCase : Any = hidden_act UpperCamelCase : Dict = hidden_dropout_prob UpperCamelCase : Optional[int] = attention_probs_dropout_prob UpperCamelCase : int = type_sequence_label_size UpperCamelCase : Union[str, Any] = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCamelCase : Tuple = (image_size // patch_size) ** 2 UpperCamelCase : List[Any] = num_patches + 1 def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase : Optional[int] = None if self.use_labels: UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : Optional[Any] = BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, pixel_values, labels def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Union[str, Any] = FlaxBeitModel(config=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = FlaxBeitForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = self.type_sequence_label_size UpperCamelCase : List[str] = FlaxBeitForImageClassification(config=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCamelCase : Optional[int] = 1 UpperCamelCase : Any = FlaxBeitForImageClassification(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Optional[Any] = config_and_inputs UpperCamelCase : Any = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class UpperCAmelCase_ ( _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : List[str] = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = FlaxBeitModelTester(self ) UpperCamelCase : Union[str, Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def _lowercase ( self ): """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self ): """simple docstring""" UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Dict = model_class(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase : Tuple = [*signature.parameters.keys()] UpperCamelCase : List[str] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase : Any = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = model_class(__SCREAMING_SNAKE_CASE ) @jax.jit def model_jitted(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): return model(pixel_values=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) with self.subTest('''JIT Enabled''' ): UpperCamelCase : Optional[int] = model_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCamelCase : Tuple = model_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) ) for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.assertEqual(jitted_output.shape , output.shape ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) @slow def _lowercase ( self ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCamelCase : int = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' ) UpperCamelCase : Dict = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def a ( ): """simple docstring""" UpperCamelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @require_flax class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' @cached_property def _lowercase ( self ): """simple docstring""" return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ) UpperCamelCase : Union[str, Any] = self.default_image_processor UpperCamelCase : Optional[int] = prepare_img() UpperCamelCase : int = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).pixel_values # prepare bool_masked_pos UpperCamelCase : Any = np.ones((1, 196) , dtype=__SCREAMING_SNAKE_CASE ) # forward pass UpperCamelCase : List[Any] = model(pixel_values=__SCREAMING_SNAKE_CASE , bool_masked_pos=__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = outputs.logits # verify the logits UpperCamelCase : Optional[int] = (1, 196, 8_192) self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = np.array( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-2 ) ) @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ) UpperCamelCase : Any = self.default_image_processor UpperCamelCase : List[Any] = prepare_img() UpperCamelCase : List[str] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' ) # forward pass UpperCamelCase : Union[str, Any] = model(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = outputs.logits # verify the logits UpperCamelCase : Any = (1, 1_000) self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = np.array([-1.2_385, -1.0_987, -1.0_108] ) self.assertTrue(np.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) ) UpperCamelCase : Tuple = 281 self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE ) @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ) UpperCamelCase : Union[str, Any] = self.default_image_processor UpperCamelCase : Dict = prepare_img() UpperCamelCase : List[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' ) # forward pass UpperCamelCase : List[str] = model(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = outputs.logits # verify the logits UpperCamelCase : str = (1, 21_841) self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = np.array([1.6_881, -0.2_787, 0.5_901] ) self.assertTrue(np.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) ) UpperCamelCase : List[str] = 2_396 self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE )
315
from __future__ import annotations import collections import pprint from pathlib import Path def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return "".join(sorted(SCREAMING_SNAKE_CASE_ ) ) def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return word_by_signature[signature(SCREAMING_SNAKE_CASE_ )] __UpperCAmelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8") __UpperCAmelCase : Tuple = sorted({word.strip().lower() for word in data.splitlines()}) __UpperCAmelCase : Union[str, Any] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": __UpperCAmelCase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("anagrams.txt", "w") as file: file.write("all_anagrams = \n ") file.write(pprint.pformat(all_anagrams))
315
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __UpperCAmelCase : Any = None __UpperCAmelCase : Dict = logging.get_logger(__name__) __UpperCAmelCase : List[str] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} __UpperCAmelCase : Optional[int] = { "vocab_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json" ), }, } __UpperCAmelCase : Optional[int] = { "moussaKam/mbarthez": 1024, "moussaKam/barthez": 1024, "moussaKam/barthez-orangesum-title": 1024, } __UpperCAmelCase : Optional[int] = "▁" class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : str = VOCAB_FILES_NAMES __UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : str = ["input_ids", "attention_mask"] __UpperCamelCase : int = BarthezTokenizer def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Optional[Any] = vocab_file UpperCamelCase : Tuple = False if not self.vocab_file else True def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase : int = [self.cls_token_id] UpperCamelCase : List[str] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : Tuple = [self.sep_token_id] UpperCamelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase : str = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
315
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ): """simple docstring""" UpperCamelCase : list[list[float]] = [] for data in source_data: for i, el in enumerate(SCREAMING_SNAKE_CASE_ ): if len(SCREAMING_SNAKE_CASE_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) ) return data_lists def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ): """simple docstring""" UpperCamelCase : list[list[float]] = [] for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: UpperCamelCase : Dict = F"""Invalid weight of {weight:f} provided""" raise ValueError(SCREAMING_SNAKE_CASE_ ) score_lists.append(SCREAMING_SNAKE_CASE_ ) return score_lists def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ): """simple docstring""" UpperCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : str = final_scores[j] + ele return final_scores def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ): """simple docstring""" UpperCamelCase : str = get_data(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = generate_final_scores(SCREAMING_SNAKE_CASE_ ) # append scores to source data for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ): source_data[i].append(SCREAMING_SNAKE_CASE_ ) return source_data
315
1
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase : Tuple = logging.get_logger(__name__) __UpperCAmelCase : Optional[Any] = "▁" __UpperCAmelCase : str = { "vocab_file": "vocab.json", "spm_file": "sentencepiece.bpe.model", } __UpperCAmelCase : str = { "vocab_file": { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json" ), }, "spm_file": { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model" ) }, } __UpperCAmelCase : Dict = { "facebook/s2t-small-librispeech-asr": 1024, } __UpperCAmelCase : Union[str, Any] = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"] __UpperCAmelCase : Any = {"mustc": MUSTC_LANGS} class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : int = VOCAB_FILES_NAMES __UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Union[str, Any] = MAX_MODEL_INPUT_SIZES __UpperCamelCase : Any = ["input_ids", "attention_mask"] __UpperCamelCase : List[int] = [] def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , do_upper_case=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , lang_codes=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[Any] = do_upper_case UpperCamelCase : Any = do_lower_case UpperCamelCase : Tuple = load_json(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = {v: k for k, v in self.encoder.items()} UpperCamelCase : Dict = spm_file UpperCamelCase : Optional[int] = load_spm(__SCREAMING_SNAKE_CASE , self.sp_model_kwargs ) if lang_codes is not None: UpperCamelCase : Union[str, Any] = lang_codes UpperCamelCase : Dict = LANGUAGES[lang_codes] UpperCamelCase : Optional[Any] = [f"""<lang:{lang}>""" for lang in self.langs] UpperCamelCase : str = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs} UpperCamelCase : str = self.lang_tokens UpperCamelCase : Dict = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: UpperCamelCase : Optional[Any] = {} @property def _lowercase ( self ): """simple docstring""" return len(self.encoder ) @property def _lowercase ( self ): """simple docstring""" return self._tgt_lang @tgt_lang.setter def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[int] = new_tgt_lang self.set_tgt_lang_special_tokens(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[str] = self.lang_code_to_id[tgt_lang] UpperCamelCase : Dict = [lang_code_id] def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder[self.unk_token] ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = [] UpperCamelCase : int = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: UpperCamelCase : int = self.sp_model.decode(__SCREAMING_SNAKE_CASE ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " UpperCamelCase : Union[str, Any] = [] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = self.sp_model.decode(__SCREAMING_SNAKE_CASE ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = [1] * len(self.prefix_tokens ) UpperCamelCase : Tuple = [1] if token_ids_a is None: return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" UpperCamelCase : List[str] = self.__dict__.copy() UpperCamelCase : Optional[int] = None return state def __setstate__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCamelCase : Optional[int] = {} UpperCamelCase : Optional[int] = load_spm(self.spm_file , self.sp_model_kwargs ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : Tuple = Path(__SCREAMING_SNAKE_CASE ) assert save_dir.is_dir(), f"""{save_directory} should be a directory""" UpperCamelCase : Any = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) UpperCamelCase : Tuple = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , __SCREAMING_SNAKE_CASE ) if os.path.abspath(self.spm_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.spm_file ): with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (str(__SCREAMING_SNAKE_CASE ), str(__SCREAMING_SNAKE_CASE )) def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict[str, Any] ): """simple docstring""" UpperCamelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE_ ) spm.Load(str(SCREAMING_SNAKE_CASE_ ) ) return spm def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f: return json.load(SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=2 )
315
import glob import os import random from string import ascii_lowercase, digits import cva __UpperCAmelCase : Optional[int] = "" __UpperCAmelCase : Union[str, Any] = "" __UpperCAmelCase : Optional[int] = "" __UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal) def a ( ): """simple docstring""" UpperCamelCase , UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print('''Processing...''' ) UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for index, image in enumerate(SCREAMING_SNAKE_CASE_ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' UpperCamelCase : Optional[int] = random_chars(3_2 ) UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] ) print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" ) UpperCamelCase : Any = [] for anno in new_annos[index]: UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(SCREAMING_SNAKE_CASE_ ) with open(F"""/{file_root}.txt""" , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" UpperCamelCase : Any = [] UpperCamelCase : Union[str, Any] = [] for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ): UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(SCREAMING_SNAKE_CASE_ ) as in_file: UpperCamelCase : List[str] = in_file.readlines() UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" ) UpperCamelCase : Union[str, Any] = [] for obj_list in obj_lists: UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(SCREAMING_SNAKE_CASE_ ) labels.append(SCREAMING_SNAKE_CASE_ ) return img_paths, labels def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ): """simple docstring""" UpperCamelCase : List[Any] = [] UpperCamelCase : str = [] UpperCamelCase : int = [] for idx in range(len(SCREAMING_SNAKE_CASE_ ) ): UpperCamelCase : Tuple = [] UpperCamelCase : Optional[int] = img_list[idx] path_list.append(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = anno_list[idx] UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ ) if flip_type == 1: UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for bbox in img_annos: UpperCamelCase : Optional[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for bbox in img_annos: UpperCamelCase : Union[str, Any] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(SCREAMING_SNAKE_CASE_ ) new_imgs_list.append(SCREAMING_SNAKE_CASE_ ) return new_imgs_list, new_annos_lists, path_list def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" UpperCamelCase : Any = ascii_lowercase + digits return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": main() print("DONE ✅")
315
1
from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging __UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) __UpperCAmelCase : int = { "deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json", # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[str] = "perceiver" def __init__( self , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=1_280 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=26 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="kv" , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=262 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=56 , __SCREAMING_SNAKE_CASE=[368, 496] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1_920 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=[1, 16, 224, 224] , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = num_latents UpperCamelCase : int = d_latents UpperCamelCase : Tuple = d_model UpperCamelCase : Dict = num_blocks UpperCamelCase : Optional[int] = num_self_attends_per_block UpperCamelCase : Any = num_self_attention_heads UpperCamelCase : Dict = num_cross_attention_heads UpperCamelCase : List[Any] = qk_channels UpperCamelCase : Optional[Any] = v_channels UpperCamelCase : Any = cross_attention_shape_for_attention UpperCamelCase : List[str] = self_attention_widening_factor UpperCamelCase : Union[str, Any] = cross_attention_widening_factor UpperCamelCase : Optional[Any] = hidden_act UpperCamelCase : Tuple = attention_probs_dropout_prob UpperCamelCase : Any = initializer_range UpperCamelCase : List[Any] = layer_norm_eps UpperCamelCase : Union[str, Any] = use_query_residual # masked language modeling attributes UpperCamelCase : List[Any] = vocab_size UpperCamelCase : Optional[int] = max_position_embeddings # image classification attributes UpperCamelCase : Optional[int] = image_size # flow attributes UpperCamelCase : List[Any] = train_size # multimodal autoencoding attributes UpperCamelCase : Optional[int] = num_frames UpperCamelCase : int = audio_samples_per_frame UpperCamelCase : List[str] = samples_per_patch UpperCamelCase : Any = output_shape class UpperCAmelCase_ ( _a): '''simple docstring''' @property def _lowercase ( self ): """simple docstring""" if self.task == "multiple-choice": UpperCamelCase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCamelCase : List[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''inputs''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] ) @property def _lowercase ( self ): """simple docstring""" return 1e-4 def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 40 , __SCREAMING_SNAKE_CASE = 40 , ): """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCamelCase : Tuple = compute_effective_axis_dimension( __SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase : Optional[Any] = preprocessor.num_special_tokens_to_add(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = compute_effective_axis_dimension( __SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__SCREAMING_SNAKE_CASE ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase : Dict = [''' '''.join(['''a'''] ) * seq_length] * batch_size UpperCamelCase : int = dict(preprocessor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) ) UpperCamelCase : Any = inputs.pop('''input_ids''' ) return inputs elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCamelCase : str = compute_effective_axis_dimension(__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch ) UpperCamelCase : Tuple = self._generate_dummy_images(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : str = dict(preprocessor(images=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) ) UpperCamelCase : Optional[int] = inputs.pop('''pixel_values''' ) return inputs else: raise ValueError( '''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
315
import qiskit def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' ) UpperCamelCase : Any = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment return job.result().get_counts(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": __UpperCAmelCase : int = half_adder(1, 1) print(f'''Half Adder Output Qubit Counts: {counts}''')
315
1
import math def a ( SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" UpperCamelCase : Tuple = factor * value UpperCamelCase : Optional[int] = value while not is_prime(SCREAMING_SNAKE_CASE_ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ ) return value
315
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __UpperCAmelCase : str = logging.get_logger(__name__) def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ): """simple docstring""" UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() ) class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[str] = CLIPConfig __UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"] def __init__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = CLIPVisionModel(config.vision_config ) UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE ) @torch.no_grad() def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy() UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy() UpperCamelCase : Dict = [] UpperCamelCase : List[str] = image_embeds.shape[0] for i in range(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images UpperCamelCase : Optional[int] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): UpperCamelCase : List[str] = special_cos_dist[i][concept_idx] UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item() UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} ) UpperCamelCase : Optional[int] = 0.01 for concept_idx in range(len(cos_dist[0] ) ): UpperCamelCase : Optional[int] = cos_dist[i][concept_idx] UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item() UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE ) result.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ) UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images UpperCamelCase : Union[str, Any] = 0.0 UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 ) UpperCamelCase : int = special_care * 0.01 UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
315
1
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __UpperCAmelCase : List[str] = logging.get_logger(__name__) __UpperCAmelCase : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} __UpperCAmelCase : Union[str, Any] = { "tokenizer_file": { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json", }, } __UpperCAmelCase : Any = { "gpt-neox-20b": 2048, } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Tuple = VOCAB_FILES_NAMES __UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : int = ["input_ids", "attention_mask"] def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space: UpperCamelCase : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) ) UpperCamelCase : Optional[int] = add_prefix_space UpperCamelCase : Optional[int] = pre_tok_class(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = add_prefix_space def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : List[Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE ) return tuple(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Dict = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) + [self.eos_token_id] ) if len(__SCREAMING_SNAKE_CASE ) > self.model_max_length: UpperCamelCase : Optional[Any] = input_ids[-self.model_max_length :] return input_ids
315
from argparse import ArgumentParser from .env import EnvironmentCommand def a ( ): """simple docstring""" UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' ) UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' ) # Register commands EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ ) # Let's go UpperCamelCase : List[Any] = parser.parse_args() if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ): parser.print_help() exit(1 ) # Run UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ ) service.run() if __name__ == "__main__": main()
315
1
import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase : List[str] = logging.get_logger(__name__) __UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.model"} __UpperCAmelCase : int = { "vocab_file": { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model", }, } __UpperCAmelCase : int = { "google/rembert": 256, } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES __UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Optional[int] = do_lower_case UpperCamelCase : List[str] = remove_space UpperCamelCase : int = keep_accents UpperCamelCase : int = vocab_file UpperCamelCase : List[str] = spm.SentencePieceProcessor() self.sp_model.Load(__SCREAMING_SNAKE_CASE ) @property def _lowercase ( self ): """simple docstring""" return len(self.sp_model ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" UpperCamelCase : List[str] = self.__dict__.copy() UpperCamelCase : List[Any] = None return state def __setstate__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[int] = d UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ): """simple docstring""" UpperCamelCase : Tuple = self.sp_model.EncodeAsPieces(__SCREAMING_SNAKE_CASE ) return pieces def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Dict = self.sp_model.decode_pieces(__SCREAMING_SNAKE_CASE ) return out_string def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : List[str] = [self.sep_token_id] UpperCamelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" UpperCamelCase : Optional[Any] = [self.sep_token_id] UpperCamelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error('''Vocabulary path ({}) should be a directory'''.format(__SCREAMING_SNAKE_CASE ) ) return UpperCamelCase : str = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
315
def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return "".join(chr(ord(SCREAMING_SNAKE_CASE_ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
315
1
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def a ( ): """simple docstring""" UpperCamelCase : Any = HfArgumentParser(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0] UpperCamelCase : Any = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ ) try: UpperCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0] except ValueError as e: UpperCamelCase : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.''' UpperCamelCase : Optional[Any] = ''' '''.join(str(SCREAMING_SNAKE_CASE_ ).split(''' ''' )[:-1] ) UpperCamelCase : str = '''''' UpperCamelCase : List[str] = eval(str(SCREAMING_SNAKE_CASE_ ).split(''' ''' )[-1] ) UpperCamelCase : Tuple = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: UpperCamelCase : Any = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ ) raise ValueError(SCREAMING_SNAKE_CASE_ ) benchmark.run() if __name__ == "__main__": main()
315
import math def a ( SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" UpperCamelCase : Tuple = factor * value UpperCamelCase : Optional[int] = value while not is_prime(SCREAMING_SNAKE_CASE_ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ ) return value
315
1
import heapq as hq import math from collections.abc import Iterator class UpperCAmelCase_ : '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[Any] = str(id_ ) UpperCamelCase : Optional[int] = None UpperCamelCase : Any = None UpperCamelCase : Any = [] UpperCamelCase : str = {} # {vertex:distance} def __lt__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.key < other.key def __repr__( self ): """simple docstring""" return self.id def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" self.neighbors.append(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = weight def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , SCREAMING_SNAKE_CASE_ ) graph[b - 1].add_edge(graph[a - 1] , SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : Vertex ): """simple docstring""" UpperCamelCase : List[Any] = [] for u in graph: UpperCamelCase : Optional[int] = math.inf UpperCamelCase : Optional[Any] = None UpperCamelCase : List[Any] = 0 UpperCamelCase : Union[str, Any] = graph[:] while q: UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ) q.remove(SCREAMING_SNAKE_CASE_ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): UpperCamelCase : List[str] = u UpperCamelCase : List[str] = u.edges[v.id] for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : Vertex ): """simple docstring""" for u in graph: UpperCamelCase : Optional[Any] = math.inf UpperCamelCase : Union[str, Any] = None UpperCamelCase : List[Any] = 0 UpperCamelCase : Union[str, Any] = list(SCREAMING_SNAKE_CASE_ ) hq.heapify(SCREAMING_SNAKE_CASE_ ) while h: UpperCamelCase : List[Any] = hq.heappop(SCREAMING_SNAKE_CASE_ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): UpperCamelCase : Optional[Any] = u UpperCamelCase : Union[str, Any] = u.edges[v.id] hq.heapify(SCREAMING_SNAKE_CASE_ ) for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def a ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
315
import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor __UpperCAmelCase : Optional[int] = logging.get_logger(__name__) class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" warnings.warn( '''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , ) super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
315
1
__UpperCAmelCase : str = {str(digit): digit**5 for digit in range(10)} def a ( SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(SCREAMING_SNAKE_CASE_ ) ) def a ( ): """simple docstring""" return sum( number for number in range(1_0_0_0 , 1_0_0_0_0_0_0 ) if number == digits_fifth_powers_sum(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": print(solution())
315
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ): """simple docstring""" UpperCamelCase : List[str] = size if size is not None else {'''height''': 18, '''width''': 18} UpperCamelCase : int = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : Optional[int] = num_channels UpperCamelCase : Union[str, Any] = image_size UpperCamelCase : Union[str, Any] = min_resolution UpperCamelCase : Tuple = max_resolution UpperCamelCase : List[str] = do_resize UpperCamelCase : List[str] = size UpperCamelCase : int = apply_ocr def _lowercase ( self ): """simple docstring""" return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class UpperCAmelCase_ ( _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = LayoutLMvaImageProcessingTester(self ) @property def _lowercase ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''apply_ocr''' ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def _lowercase ( self ): """simple docstring""" pass def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE ) self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE ) # Test batched UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCamelCase : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCamelCase : Optional[int] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = LayoutLMvaImageProcessor() from datasets import load_dataset UpperCamelCase : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' ) UpperCamelCase : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 UpperCamelCase : Union[str, Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 UpperCamelCase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE ) self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE ) # with apply_OCR = False UpperCamelCase : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
315
1
from collections import defaultdict from math import ceil, sqrt def a ( SCREAMING_SNAKE_CASE_ : int = 1_0_0_0_0_0_0 , SCREAMING_SNAKE_CASE_ : int = 1_0 ): """simple docstring""" UpperCamelCase : defaultdict = defaultdict(SCREAMING_SNAKE_CASE_ ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: UpperCamelCase : int = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: UpperCamelCase : Optional[Any] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(SCREAMING_SNAKE_CASE_ , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(f'''{solution() = }''')
315
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def a ( SCREAMING_SNAKE_CASE_ : dict ): """simple docstring""" return (data["data"], data["target"]) def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ): """simple docstring""" UpperCamelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 ) xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Predict target for test data UpperCamelCase : Any = xgb.predict(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 ) return predictions def a ( ): """simple docstring""" UpperCamelCase : Tuple = fetch_california_housing() UpperCamelCase , UpperCamelCase : Tuple = data_handling(SCREAMING_SNAKE_CASE_ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = train_test_split( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 ) UpperCamelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Error printing print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" ) print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
315
1
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __UpperCAmelCase : Optional[Any] = sys.version_info >= (3, 10) def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ): """simple docstring""" return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ ) @dataclass class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : int __UpperCamelCase : float __UpperCamelCase : str __UpperCamelCase : bool @dataclass class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : int = 42 __UpperCamelCase : str = field(default="toto", metadata={"help": "help message"}) @dataclass class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : bool = False __UpperCamelCase : bool = True __UpperCamelCase : Optional[bool] = None class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Union[str, Any] = "titi" __UpperCamelCase : Optional[Any] = "toto" class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Optional[int] = "titi" __UpperCamelCase : Union[str, Any] = "toto" __UpperCamelCase : Optional[Any] = 42 @dataclass class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : BasicEnum = "toto" def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = BasicEnum(self.foo ) @dataclass class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : MixedTypeEnum = "toto" def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = MixedTypeEnum(self.foo ) @dataclass class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : Optional[int] = None __UpperCamelCase : Optional[float] = field(default=_a, metadata={"help": "help message"}) __UpperCamelCase : Optional[str] = None __UpperCamelCase : Optional[List[str]] = list_field(default=[]) __UpperCamelCase : Optional[List[int]] = list_field(default=[]) @dataclass class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : List[int] = list_field(default=[]) __UpperCamelCase : List[int] = list_field(default=[1, 2, 3]) __UpperCamelCase : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"]) __UpperCamelCase : List[float] = list_field(default=[0.1, 0.2, 0.3]) @dataclass class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : List[int] = field() __UpperCamelCase : str = field() __UpperCamelCase : BasicEnum = field() def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = BasicEnum(self.required_enum ) @dataclass class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : int __UpperCamelCase : "BasicEnum" = field() __UpperCamelCase : "Optional[bool]" = None __UpperCamelCase : "str" = field(default="toto", metadata={"help": "help message"}) __UpperCamelCase : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"]) if is_python_no_less_than_3_10: @dataclass class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : bool = False __UpperCamelCase : bool = True __UpperCamelCase : bool | None = None @dataclass class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : int | None = None __UpperCamelCase : float | None = field(default=_a, metadata={"help": "help message"}) __UpperCamelCase : str | None = None __UpperCamelCase : list[str] | None = list_field(default=[]) __UpperCamelCase : list[int] | None = list_field(default=[]) class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): UpperCamelCase : List[Any] = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k != '''container'''} UpperCamelCase : Optional[int] = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , __SCREAMING_SNAKE_CASE ) and yy.get('''choices''' , __SCREAMING_SNAKE_CASE ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](__SCREAMING_SNAKE_CASE ) , yy['''type'''](__SCREAMING_SNAKE_CASE ) ) del xx["type"], yy["type"] self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = HfArgumentParser(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--bar''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--baz''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--flag''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='''?''' ) self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : str = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((UpperCamelCase) , ) : Optional[int] = parser.parse_args_into_dataclasses(__SCREAMING_SNAKE_CASE , look_for_args_file=__SCREAMING_SNAKE_CASE ) self.assertFalse(example.flag ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = HfArgumentParser(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--baz''' , default='''toto''' , type=__SCREAMING_SNAKE_CASE , help='''help message''' ) self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='''?''' ) expected.add_argument('''--baz''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=__SCREAMING_SNAKE_CASE , dest='''baz''' ) expected.add_argument('''--opt''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__SCREAMING_SNAKE_CASE ) for dataclass_type in dataclass_types: UpperCamelCase : List[str] = HfArgumentParser(__SCREAMING_SNAKE_CASE ) self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = parser.parse_args([] ) self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) ) UpperCamelCase : Any = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) ) UpperCamelCase : Optional[Any] = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) ) UpperCamelCase : Union[str, Any] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) ) UpperCamelCase : Optional[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = HfArgumentParser(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) UpperCamelCase : str = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) UpperCamelCase : Any = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) UpperCamelCase : Any = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) UpperCamelCase : Any = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) UpperCamelCase : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def _lowercase ( self ): """simple docstring""" @dataclass class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : Literal["titi", "toto", 42] = "toto" UpperCamelCase : Tuple = HfArgumentParser(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : str = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) UpperCamelCase : Optional[Any] = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) UpperCamelCase : Dict = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = HfArgumentParser(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__SCREAMING_SNAKE_CASE ) self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = parser.parse_args([] ) self.assertEqual( __SCREAMING_SNAKE_CASE , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) UpperCamelCase : Dict = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--bar''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help='''help message''' ) expected.add_argument('''--baz''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__SCREAMING_SNAKE_CASE ) for dataclass_type in dataclass_types: UpperCamelCase : List[str] = HfArgumentParser(__SCREAMING_SNAKE_CASE ) self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = parser.parse_args([] ) self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , bar=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , ces=[] , des=[] ) ) UpperCamelCase : List[str] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = HfArgumentParser(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--required_str''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__SCREAMING_SNAKE_CASE , ) self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = HfArgumentParser(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__SCREAMING_SNAKE_CASE , ) expected.add_argument('''--opt''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE ) expected.add_argument('''--baz''' , default='''toto''' , type=__SCREAMING_SNAKE_CASE , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__SCREAMING_SNAKE_CASE ) self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = HfArgumentParser(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } UpperCamelCase : Dict = parser.parse_dict(__SCREAMING_SNAKE_CASE )[0] UpperCamelCase : str = BasicExample(**__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = HfArgumentParser(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(__SCREAMING_SNAKE_CASE , parser.parse_dict , __SCREAMING_SNAKE_CASE , allow_extra_keys=__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = HfArgumentParser(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase : Any = os.path.join(__SCREAMING_SNAKE_CASE , '''temp_json''' ) os.mkdir(__SCREAMING_SNAKE_CASE ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] UpperCamelCase : Union[str, Any] = BasicExample(**__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = HfArgumentParser(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , '''temp_yaml''' ) os.mkdir(__SCREAMING_SNAKE_CASE ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] UpperCamelCase : Any = BasicExample(**__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = HfArgumentParser(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
315
__UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" __UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}] __UpperCAmelCase : Union[str, Any] = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
315
1
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
315
import collections import os import re from pathlib import Path __UpperCAmelCase : List[str] = "src/transformers" # Matches is_xxx_available() __UpperCAmelCase : int = re.compile(r"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} __UpperCAmelCase : Optional[int] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __UpperCAmelCase : List[Any] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available __UpperCAmelCase : List[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") __UpperCAmelCase : str = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", __UpperCAmelCase : Dict = re.compile(r"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], __UpperCAmelCase : str = re.compile(r"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo __UpperCAmelCase : str = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: __UpperCAmelCase : Any = re.compile(r"^\s*try:") # Catches a line with else: __UpperCAmelCase : List[Any] = re.compile(r"^\s*else:") def a ( SCREAMING_SNAKE_CASE_ : Dict ): """simple docstring""" if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None: return None UpperCamelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )] backends.sort() return "_and_".join(SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCamelCase : Tuple = f.readlines() UpperCamelCase : Tuple = 0 while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(SCREAMING_SNAKE_CASE_ ): return None # First grab the objects without a specific backend in _import_structure UpperCamelCase : List[Any] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: UpperCamelCase : Optional[int] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0] UpperCamelCase : str = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue UpperCamelCase : List[Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 UpperCamelCase : Dict = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCamelCase : Dict = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCamelCase : Optional[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCamelCase : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): UpperCamelCase : str = lines[line_index] if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None: objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] ) elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None: UpperCamelCase : Union[str, Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' ) UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None: UpperCamelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' ) UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0] objects.extend(SCREAMING_SNAKE_CASE_ ) elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None: objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 1_2 + '''"''' ): objects.append(line[1_3:-3] ) line_index += 1 UpperCamelCase : Tuple = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCamelCase : int = [] while ( line_index < len(SCREAMING_SNAKE_CASE_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): UpperCamelCase : Tuple = lines[line_index] UpperCamelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCamelCase : Any = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(SCREAMING_SNAKE_CASE_ ): # If the line is an if is_backend_available, we grab all objects associated. UpperCamelCase : Optional[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCamelCase : Dict = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCamelCase : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): UpperCamelCase : Optional[Any] = lines[line_index] UpperCamelCase : str = _re_import.search(SCREAMING_SNAKE_CASE_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 1_2 ): objects.append(line[1_2:-2] ) line_index += 1 UpperCamelCase : str = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ): """simple docstring""" def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ): return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCamelCase : Dict = [] for key in import_dict_objects.keys(): UpperCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCamelCase : List[str] = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def a ( ): """simple docstring""" UpperCamelCase : Any = [] for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ): if "__init__.py" in files: UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) UpperCamelCase : Optional[int] = parse_init(SCREAMING_SNAKE_CASE_ ) if objects is not None: UpperCamelCase : str = analyze_results(*SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: UpperCamelCase : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) if len(SCREAMING_SNAKE_CASE_ ) > 0: raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) ) def a ( ): """simple docstring""" UpperCamelCase : Dict = [] for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(SCREAMING_SNAKE_CASE_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0: continue UpperCamelCase : List[str] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : str = short_path.replace(os.path.sep , '''.''' ) submodules.append(SCREAMING_SNAKE_CASE_ ) for fname in files: if fname == "__init__.py": continue UpperCamelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(SCREAMING_SNAKE_CASE_ ) return submodules __UpperCAmelCase : Optional[int] = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def a ( ): """simple docstring""" from transformers.utils import direct_transformers_import UpperCamelCase : Tuple = direct_transformers_import(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , '''r''' ) as f: UpperCamelCase : List[Any] = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE_ ) ) ) UpperCamelCase : Union[str, Any] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(SCREAMING_SNAKE_CASE_ ) > 0: UpperCamelCase : str = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
315
1
import copy import re class UpperCAmelCase_ : '''simple docstring''' __UpperCamelCase : Optional[int] = "hp" __UpperCamelCase : Any = {} __UpperCamelCase : Tuple = None @classmethod def _lowercase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Dict = prefix UpperCamelCase : Tuple = defaults cls.build_naming_info() @staticmethod def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" if len(__SCREAMING_SNAKE_CASE ) == 0: return "" UpperCamelCase : Any = None if any(char.isdigit() for char in word ): raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 ): UpperCamelCase : Optional[Any] = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: UpperCamelCase : Dict = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[Any] = '''''' while integer != 0: UpperCamelCase : List[str] = chr(ord('''A''' ) + integer % 10 ) + s integer //= 10 return s UpperCamelCase : List[str] = 0 while True: UpperCamelCase : Union[str, Any] = word + '''#''' + int_to_alphabetic(__SCREAMING_SNAKE_CASE ) if sword in info["reverse_short_word"]: continue else: UpperCamelCase : List[Any] = sword break UpperCamelCase : Optional[int] = short_word UpperCamelCase : str = word return short_word @staticmethod def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Union[str, Any] = param_name.split('''_''' ) UpperCamelCase : Any = [TrialShortNamer.shortname_for_word(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name UpperCamelCase : int = ['''''', '''_'''] for separator in separators: UpperCamelCase : Dict = separator.join(__SCREAMING_SNAKE_CASE ) if shortname not in info["reverse_short_param"]: UpperCamelCase : Tuple = shortname UpperCamelCase : List[Any] = param_name return shortname return param_name @staticmethod def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[int] = TrialShortNamer.shortname_for_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = short_name UpperCamelCase : Union[str, Any] = param_name @classmethod def _lowercase ( cls ): """simple docstring""" if cls.NAMING_INFO is not None: return UpperCamelCase : Union[str, Any] = { '''short_word''': {}, '''reverse_short_word''': {}, '''short_param''': {}, '''reverse_short_param''': {}, } UpperCamelCase : Optional[Any] = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = info @classmethod def _lowercase ( cls , __SCREAMING_SNAKE_CASE ): """simple docstring""" cls.build_naming_info() assert cls.PREFIX is not None UpperCamelCase : Any = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue UpperCamelCase : Any = cls.NAMING_INFO['''short_param'''][k] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : Union[str, Any] = 1 if v else 0 UpperCamelCase : List[Any] = '''''' if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ) else '''-''' UpperCamelCase : str = f"""{key}{sep}{v}""" name.append(__SCREAMING_SNAKE_CASE ) return "_".join(__SCREAMING_SNAKE_CASE ) @classmethod def _lowercase ( cls , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : str = repr[len(cls.PREFIX ) + 1 :] if repr == "": UpperCamelCase : Optional[int] = [] else: UpperCamelCase : str = repr.split('''_''' ) UpperCamelCase : Any = {} for value in values: if "-" in value: UpperCamelCase , UpperCamelCase : Any = value.split('''-''' ) else: UpperCamelCase : Tuple = re.sub('''[0-9.]''' , '''''' , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = float(re.sub('''[^0-9.]''' , '''''' , __SCREAMING_SNAKE_CASE ) ) UpperCamelCase : List[str] = cls.NAMING_INFO['''reverse_short_param'''][p_k] UpperCamelCase : Tuple = p_v for k in cls.DEFAULTS: if k not in parameters: UpperCamelCase : List[Any] = cls.DEFAULTS[k] return parameters
315
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" UpperCamelCase : Any = set() # Replace all the whitespace in our sentence UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(SCREAMING_SNAKE_CASE_ ) == 2_6 def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" UpperCamelCase : str = [False] * 2_6 for char in input_str: if char.islower(): UpperCamelCase : List[Any] = True elif char.isupper(): UpperCamelCase : List[Any] = True return all(SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6 def a ( ): """simple docstring""" from timeit import timeit UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest''' print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) ) print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) ) print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
315
1
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=1 , ): """simple docstring""" UpperCamelCase : List[Any] = parent UpperCamelCase : List[str] = batch_size UpperCamelCase : Optional[int] = seq_length UpperCamelCase : Any = is_training UpperCamelCase : List[Any] = use_input_mask UpperCamelCase : Tuple = use_token_type_ids UpperCamelCase : Optional[Any] = use_labels UpperCamelCase : Tuple = vocab_size UpperCamelCase : str = hidden_size UpperCamelCase : List[Any] = num_hidden_layers UpperCamelCase : Optional[Any] = num_attention_heads UpperCamelCase : Tuple = intermediate_size UpperCamelCase : Any = hidden_act UpperCamelCase : str = hidden_dropout_prob UpperCamelCase : Optional[Any] = attention_probs_dropout_prob UpperCamelCase : int = max_position_embeddings UpperCamelCase : List[Any] = type_vocab_size UpperCamelCase : Optional[Any] = type_sequence_label_size UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : List[Any] = num_labels UpperCamelCase : str = num_choices UpperCamelCase : List[Any] = scope UpperCamelCase : Tuple = q_groups UpperCamelCase : List[Any] = k_groups UpperCamelCase : Any = v_groups UpperCamelCase : str = post_attention_groups UpperCamelCase : Dict = intermediate_groups UpperCamelCase : List[str] = output_groups def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : List[str] = None if self.use_input_mask: UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase : List[str] = None UpperCamelCase : Dict = None UpperCamelCase : Dict = None if self.use_labels: UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self ): """simple docstring""" return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = SqueezeBertModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : int = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = SqueezeBertForMaskedLM(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = SqueezeBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : str = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = self.num_labels UpperCamelCase : Tuple = SqueezeBertForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[Any] = self.num_labels UpperCamelCase : Tuple = SqueezeBertForTokenClassification(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[int] = self.num_choices UpperCamelCase : Dict = SqueezeBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase : Dict = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = self.prepare_config_and_inputs() ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Dict = config_and_inputs UpperCamelCase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a, _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : Union[str, Any] = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) __UpperCamelCase : List[str] = ( { "feature-extraction": SqueezeBertModel, "fill-mask": SqueezeBertForMaskedLM, "question-answering": SqueezeBertForQuestionAnswering, "text-classification": SqueezeBertForSequenceClassification, "token-classification": SqueezeBertForTokenClassification, "zero-shot": SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase : List[str] = False __UpperCamelCase : int = True __UpperCamelCase : Any = False def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = SqueezeBertModelTester(self ) UpperCamelCase : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , dim=37 ) def _lowercase ( self ): """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__SCREAMING_SNAKE_CASE ) @slow def _lowercase ( self ): """simple docstring""" for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : int = SqueezeBertModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @require_sentencepiece @require_tokenizers @require_torch class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' ) UpperCamelCase : List[Any] = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] ) UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )[0] UpperCamelCase : Tuple = torch.Size((1, 3) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] ) self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
315
import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) __UpperCAmelCase : Union[str, Any] = logging.getLogger() def a ( ): """simple docstring""" UpperCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument('''-f''' ) UpperCamelCase : List[str] = parser.parse_args() return args.f class UpperCAmelCase_ ( _a): '''simple docstring''' def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout ) logger.addHandler(__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Dict = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , '''run_glue_deebert.py''' ) with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ): UpperCamelCase : int = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(__SCREAMING_SNAKE_CASE , 0.666 ) @slow @require_torch_non_multi_gpu def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = ''' --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage '''.split() self.run_and_check(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(__SCREAMING_SNAKE_CASE )
315
1
import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ): """simple docstring""" UpperCamelCase : Optional[Any] = parent UpperCamelCase : Optional[int] = batch_size UpperCamelCase : Optional[Any] = seq_length UpperCamelCase : Tuple = is_training UpperCamelCase : Optional[Any] = use_attention_mask UpperCamelCase : str = use_token_type_ids UpperCamelCase : Dict = use_labels UpperCamelCase : Optional[Any] = vocab_size UpperCamelCase : int = hidden_size UpperCamelCase : Optional[Any] = num_hidden_layers UpperCamelCase : Optional[int] = num_attention_heads UpperCamelCase : Dict = intermediate_size UpperCamelCase : Optional[Any] = hidden_act UpperCamelCase : int = hidden_dropout_prob UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob UpperCamelCase : Any = max_position_embeddings UpperCamelCase : Optional[int] = type_vocab_size UpperCamelCase : List[Any] = type_sequence_label_size UpperCamelCase : Optional[Any] = initializer_range UpperCamelCase : Tuple = num_choices def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : Optional[Any] = None if self.use_attention_mask: UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase : Dict = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__SCREAMING_SNAKE_CASE , ) return config, input_ids, attention_mask def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = config_and_inputs UpperCamelCase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class UpperCAmelCase_ ( _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : str = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = FlaxDistilBertModelTester(self ) @slow def _lowercase ( self ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCamelCase : Union[str, Any] = model_class_name.from_pretrained('''distilbert-base-uncased''' ) UpperCamelCase : Optional[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @require_flax class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' @slow def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' ) UpperCamelCase : Union[str, Any] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) UpperCamelCase : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) UpperCamelCase : int = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0] UpperCamelCase : Optional[Any] = (1, 11, 768) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
315
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase : Tuple = logging.get_logger(__name__) __UpperCAmelCase : Union[str, Any] = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[Any] = "ibert" def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="none" , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = vocab_size UpperCamelCase : Optional[int] = hidden_size UpperCamelCase : Tuple = num_hidden_layers UpperCamelCase : Optional[Any] = num_attention_heads UpperCamelCase : Dict = hidden_act UpperCamelCase : Union[str, Any] = intermediate_size UpperCamelCase : str = hidden_dropout_prob UpperCamelCase : Any = attention_probs_dropout_prob UpperCamelCase : Dict = max_position_embeddings UpperCamelCase : Union[str, Any] = type_vocab_size UpperCamelCase : Optional[Any] = initializer_range UpperCamelCase : Union[str, Any] = layer_norm_eps UpperCamelCase : Dict = position_embedding_type UpperCamelCase : int = quant_mode UpperCamelCase : Any = force_dequant class UpperCAmelCase_ ( _a): '''simple docstring''' @property def _lowercase ( self ): """simple docstring""" if self.task == "multiple-choice": UpperCamelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
315
1
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __UpperCAmelCase : Optional[int] = logging.get_logger(__name__) __UpperCAmelCase : Any = OrderedDict( [ ("align", "EfficientNetImageProcessor"), ("beit", "BeitImageProcessor"), ("bit", "BitImageProcessor"), ("blip", "BlipImageProcessor"), ("blip-2", "BlipImageProcessor"), ("bridgetower", "BridgeTowerImageProcessor"), ("chinese_clip", "ChineseCLIPImageProcessor"), ("clip", "CLIPImageProcessor"), ("clipseg", "ViTImageProcessor"), ("conditional_detr", "ConditionalDetrImageProcessor"), ("convnext", "ConvNextImageProcessor"), ("convnextv2", "ConvNextImageProcessor"), ("cvt", "ConvNextImageProcessor"), ("data2vec-vision", "BeitImageProcessor"), ("deformable_detr", "DeformableDetrImageProcessor"), ("deit", "DeiTImageProcessor"), ("deta", "DetaImageProcessor"), ("detr", "DetrImageProcessor"), ("dinat", "ViTImageProcessor"), ("donut-swin", "DonutImageProcessor"), ("dpt", "DPTImageProcessor"), ("efficientformer", "EfficientFormerImageProcessor"), ("efficientnet", "EfficientNetImageProcessor"), ("flava", "FlavaImageProcessor"), ("focalnet", "BitImageProcessor"), ("git", "CLIPImageProcessor"), ("glpn", "GLPNImageProcessor"), ("groupvit", "CLIPImageProcessor"), ("imagegpt", "ImageGPTImageProcessor"), ("instructblip", "BlipImageProcessor"), ("layoutlmv2", "LayoutLMv2ImageProcessor"), ("layoutlmv3", "LayoutLMv3ImageProcessor"), ("levit", "LevitImageProcessor"), ("mask2former", "Mask2FormerImageProcessor"), ("maskformer", "MaskFormerImageProcessor"), ("mgp-str", "ViTImageProcessor"), ("mobilenet_v1", "MobileNetV1ImageProcessor"), ("mobilenet_v2", "MobileNetV2ImageProcessor"), ("mobilevit", "MobileViTImageProcessor"), ("mobilevit", "MobileViTImageProcessor"), ("mobilevitv2", "MobileViTImageProcessor"), ("nat", "ViTImageProcessor"), ("oneformer", "OneFormerImageProcessor"), ("owlvit", "OwlViTImageProcessor"), ("perceiver", "PerceiverImageProcessor"), ("pix2struct", "Pix2StructImageProcessor"), ("poolformer", "PoolFormerImageProcessor"), ("regnet", "ConvNextImageProcessor"), ("resnet", "ConvNextImageProcessor"), ("sam", "SamImageProcessor"), ("segformer", "SegformerImageProcessor"), ("swiftformer", "ViTImageProcessor"), ("swin", "ViTImageProcessor"), ("swin2sr", "Swin2SRImageProcessor"), ("swinv2", "ViTImageProcessor"), ("table-transformer", "DetrImageProcessor"), ("timesformer", "VideoMAEImageProcessor"), ("tvlt", "TvltImageProcessor"), ("upernet", "SegformerImageProcessor"), ("van", "ConvNextImageProcessor"), ("videomae", "VideoMAEImageProcessor"), ("vilt", "ViltImageProcessor"), ("vit", "ViTImageProcessor"), ("vit_hybrid", "ViTHybridImageProcessor"), ("vit_mae", "ViTImageProcessor"), ("vit_msn", "ViTImageProcessor"), ("xclip", "CLIPImageProcessor"), ("yolos", "YolosImageProcessor"), ] ) __UpperCAmelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: UpperCamelCase : Tuple = model_type_to_module_name(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = importlib.import_module(F""".{module_name}""" , '''transformers.models''' ) try: return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(SCREAMING_SNAKE_CASE_ , '''__name__''' , SCREAMING_SNAKE_CASE_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. UpperCamelCase : List[Any] = importlib.import_module('''transformers''' ) if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return None def a ( SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, str]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[bool, str]] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : Optional[int] , ): """simple docstring""" UpperCamelCase : int = get_file_from_repo( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , ) if resolved_config_file is None: logger.info( '''Could not locate the image processor configuration file, will try to use the model config instead.''' ) return {} with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as reader: return json.load(SCREAMING_SNAKE_CASE_ ) class UpperCAmelCase_ : '''simple docstring''' def __init__( self ): """simple docstring""" raise EnvironmentError( '''AutoImageProcessor is designed to be instantiated ''' '''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(__SCREAMING_SNAKE_CASE ) def _lowercase ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[int] = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE ) UpperCamelCase : str = kwargs.pop('''trust_remote_code''' , __SCREAMING_SNAKE_CASE ) UpperCamelCase : int = True UpperCamelCase , UpperCamelCase : Any = ImageProcessingMixin.get_image_processor_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = config_dict.get('''image_processor_type''' , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = None if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ): UpperCamelCase : Optional[Any] = config_dict['''auto_map''']['''AutoImageProcessor'''] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: UpperCamelCase : Any = config_dict.pop('''feature_extractor_type''' , __SCREAMING_SNAKE_CASE ) if feature_extractor_class is not None: logger.warning( '''Could not find image processor class in the image processor config or the model config. Loading''' ''' based on pattern matching with the model\'s feature extractor configuration.''' ) UpperCamelCase : Tuple = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' ) if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ): UpperCamelCase : List[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor'''] UpperCamelCase : Tuple = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' ) logger.warning( '''Could not find image processor auto map in the image processor config or the model config.''' ''' Loading based on pattern matching with the model\'s feature extractor configuration.''' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : List[str] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) # It could be in `config.image_processor_type`` UpperCamelCase : int = getattr(__SCREAMING_SNAKE_CASE , '''image_processor_type''' , __SCREAMING_SNAKE_CASE ) if hasattr(__SCREAMING_SNAKE_CASE , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map: UpperCamelCase : List[Any] = config.auto_map['''AutoImageProcessor'''] if image_processor_class is not None: UpperCamelCase : List[str] = image_processor_class_from_name(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = image_processor_auto_map is not None UpperCamelCase : Dict = image_processor_class is not None or type(__SCREAMING_SNAKE_CASE ) in IMAGE_PROCESSOR_MAPPING UpperCamelCase : Union[str, Any] = resolve_trust_remote_code( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if has_remote_code and trust_remote_code: UpperCamelCase : Dict = get_class_from_dynamic_module( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = kwargs.pop('''code_revision''' , __SCREAMING_SNAKE_CASE ) if os.path.isdir(__SCREAMING_SNAKE_CASE ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) elif image_processor_class is not None: return image_processor_class.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(__SCREAMING_SNAKE_CASE ) in IMAGE_PROCESSOR_MAPPING: UpperCamelCase : Dict = IMAGE_PROCESSOR_MAPPING[type(__SCREAMING_SNAKE_CASE )] return image_processor_class.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) raise ValueError( f"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """ f"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """ f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" IMAGE_PROCESSOR_MAPPING.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
315
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup __UpperCAmelCase : int = logging.get_logger(__name__) class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[Any] = [] UpperCamelCase : int = [] UpperCamelCase : List[Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag UpperCamelCase : Tuple = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) ) UpperCamelCase : Optional[Any] = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[Any] = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' ) UpperCamelCase : Union[str, Any] = [] UpperCamelCase : List[str] = [] UpperCamelCase : str = [] for element in html_code.descendants: if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue UpperCamelCase : Any = html.unescape(__SCREAMING_SNAKE_CASE ).strip() if not text_in_this_tag: continue all_doc_strings.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase , UpperCamelCase : int = self.xpath_soup(__SCREAMING_SNAKE_CASE ) stringaxtag_seq.append(__SCREAMING_SNAKE_CASE ) stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[Any] = '''''' for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): xpath += f"""/{tagname}""" if subs != 0: xpath += f"""[{subs}]""" return xpath def __call__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = False # Check that strings has a valid type if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[Any] = True elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ): if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ): UpperCamelCase : List[str] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' f"""but is of type {type(__SCREAMING_SNAKE_CASE )}.""" ) UpperCamelCase : int = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) ) if not is_batched: UpperCamelCase : Union[str, Any] = [html_strings] # Get nodes + xpaths UpperCamelCase : str = [] UpperCamelCase : int = [] for html_string in html_strings: UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self.get_three_from_single(__SCREAMING_SNAKE_CASE ) nodes.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = [] for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : str = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) xpath_strings.append(__SCREAMING_SNAKE_CASE ) xpaths.append(__SCREAMING_SNAKE_CASE ) # return as Dict UpperCamelCase : List[str] = {'''nodes''': nodes, '''xpaths''': xpaths} UpperCamelCase : List[Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) return encoded_inputs
315
1
import datasets from .evaluate import evaluate __UpperCAmelCase : Optional[Any] = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n" __UpperCAmelCase : Dict = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n" __UpperCAmelCase : int = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class UpperCAmelCase_ ( datasets.Metric): '''simple docstring''' def _lowercase ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )}, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} UpperCamelCase : int = [ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] UpperCamelCase : List[str] = evaluate(dataset=__SCREAMING_SNAKE_CASE , predictions=__SCREAMING_SNAKE_CASE ) return score
315
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params __UpperCAmelCase : List[str] = getLogger(__name__) __UpperCAmelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu" def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : int="summarization" , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Any , ): """simple docstring""" UpperCamelCase : Dict = Path(SCREAMING_SNAKE_CASE_ ).open('''w''' , encoding='''utf-8''' ) UpperCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) if fpaa: UpperCamelCase : List[Any] = model.half() UpperCamelCase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type. UpperCamelCase : int = time.time() # update config with task specific params use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if prefix is None: UpperCamelCase : Union[str, Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ): UpperCamelCase : Optional[int] = [prefix + text for text in examples_chunk] UpperCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ , padding='''longest''' ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE_ , ) UpperCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) for hypothesis in dec: fout.write(hypothesis + '''\n''' ) fout.flush() fout.close() UpperCamelCase : str = int(time.time() - start_time ) # seconds UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def a ( ): """simple docstring""" return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' ) def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ): """simple docstring""" UpperCamelCase : int = argparse.ArgumentParser() parser.add_argument('''model_name''' , type=SCREAMING_SNAKE_CASE_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''input_path''' , type=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.source''' ) parser.add_argument('''save_path''' , type=SCREAMING_SNAKE_CASE_ , help='''where to save summaries''' ) parser.add_argument('''--reference_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.target''' ) parser.add_argument('''--score_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default='''metrics.json''' , help='''where to save metrics''' ) parser.add_argument('''--device''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''cuda, cuda:1, cpu etc.''' ) parser.add_argument( '''--prefix''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--task''' , type=SCREAMING_SNAKE_CASE_ , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help='''batch size''' ) parser.add_argument( '''--n_obs''' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='''How many observations. Defaults to all.''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' ) parser.add_argument( '''--info''' , nargs='''?''' , type=SCREAMING_SNAKE_CASE_ , const=datetime_now() , help=( '''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.''' ''' lang=en-ru. If no value is passed, the current datetime string will be used.''' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate UpperCamelCase , UpperCamelCase : int = parser.parse_known_args() UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ ) if parsed_args and verbose: print(F"""parsed the following generate kwargs: {parsed_args}""" ) UpperCamelCase : str = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: UpperCamelCase : Tuple = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('''Can\'t mix --fp16 and --device cpu''' ) UpperCamelCase : str = generate_summaries_or_translations( SCREAMING_SNAKE_CASE_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE_ , ) if args.reference_path is None: return {} # Compute scores UpperCamelCase : Tuple = calculate_bleu if '''translation''' in args.task else calculate_rouge UpperCamelCase : Dict = [x.rstrip() for x in open(args.save_path ).readlines()] UpperCamelCase : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE_ )] UpperCamelCase : dict = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) scores.update(SCREAMING_SNAKE_CASE_ ) if args.dump_args: scores.update(SCREAMING_SNAKE_CASE_ ) if args.info: UpperCamelCase : Optional[Any] = args.info if verbose: print(SCREAMING_SNAKE_CASE_ ) if args.score_path is not None: json.dump(SCREAMING_SNAKE_CASE_ , open(args.score_path , '''w''' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
315
1
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def a ( ): """simple docstring""" UpperCamelCase : Dict = [randint(-1_0_0_0 , 1_0_0_0 ) for i in range(1_0 )] UpperCamelCase : Any = randint(-5_0_0_0 , 5_0_0_0 ) return (arr, r) __UpperCAmelCase : List[Any] = make_dataset() def a ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" for triplet in permutations(SCREAMING_SNAKE_CASE_ , 3 ): if sum(SCREAMING_SNAKE_CASE_ ) == target: return tuple(sorted(SCREAMING_SNAKE_CASE_ ) ) return (0, 0, 0) def a ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" arr.sort() UpperCamelCase : Dict = len(SCREAMING_SNAKE_CASE_ ) for i in range(n - 1 ): UpperCamelCase , UpperCamelCase : Optional[Any] = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def a ( ): """simple docstring""" UpperCamelCase : Any = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' UpperCamelCase : Optional[Any] = ''' triplet_sum1(*dataset) ''' UpperCamelCase : Optional[int] = ''' triplet_sum2(*dataset) ''' UpperCamelCase : List[Any] = repeat(setup=SCREAMING_SNAKE_CASE_ , stmt=SCREAMING_SNAKE_CASE_ , repeat=5 , number=1_0_0_0_0 ) UpperCamelCase : Dict = repeat(setup=SCREAMING_SNAKE_CASE_ , stmt=SCREAMING_SNAKE_CASE_ , repeat=5 , number=1_0_0_0_0 ) return (min(SCREAMING_SNAKE_CASE_ ), min(SCREAMING_SNAKE_CASE_ )) if __name__ == "__main__": from doctest import testmod testmod() __UpperCAmelCase : Union[str, Any] = solution_times() print(f'''The time for naive implementation is {times[0]}.''') print(f'''The time for optimized implementation is {times[1]}.''')
315
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : int = ["image_processor", "tokenizer"] __UpperCamelCase : List[str] = "AutoImageProcessor" __UpperCamelCase : Optional[Any] = "AutoTokenizer" def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __SCREAMING_SNAKE_CASE , ) UpperCamelCase : Any = kwargs.pop('''feature_extractor''' ) UpperCamelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = self.image_processor UpperCamelCase : int = False def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = kwargs.pop('''images''' , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = kwargs.pop('''text''' , __SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: UpperCamelCase : Union[str, Any] = args[0] UpperCamelCase : str = args[1:] if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: UpperCamelCase : List[str] = self.image_processor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if text is not None: UpperCamelCase : Optional[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if text is None: return inputs elif images is None: return encodings else: UpperCamelCase : List[str] = encodings['''input_ids'''] return inputs def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @contextmanager def _lowercase ( self ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your images inputs, or in a separate call.''' ) UpperCamelCase : Any = True UpperCamelCase : int = self.tokenizer yield UpperCamelCase : List[Any] = self.image_processor UpperCamelCase : Tuple = False def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" if added_vocab is None: UpperCamelCase : str = self.tokenizer.get_added_vocab() UpperCamelCase : int = {} while tokens: UpperCamelCase : Dict = re.search(R'''<s_(.*?)>''' , __SCREAMING_SNAKE_CASE , re.IGNORECASE ) if start_token is None: break UpperCamelCase : List[str] = start_token.group(1 ) UpperCamelCase : Dict = re.search(Rf"""</s_{key}>""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE ) UpperCamelCase : Any = start_token.group() if end_token is None: UpperCamelCase : Optional[int] = tokens.replace(__SCREAMING_SNAKE_CASE , '''''' ) else: UpperCamelCase : Dict = end_token.group() UpperCamelCase : int = re.escape(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = re.escape(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE ) if content is not None: UpperCamelCase : Dict = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node UpperCamelCase : Tuple = self.tokenajson(__SCREAMING_SNAKE_CASE , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE ) if value: if len(__SCREAMING_SNAKE_CASE ) == 1: UpperCamelCase : str = value[0] UpperCamelCase : str = value else: # leaf nodes UpperCamelCase : Optional[int] = [] for leaf in content.split(R'''<sep/>''' ): UpperCamelCase : Optional[int] = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": UpperCamelCase : int = leaf[1:-2] # for categorical special tokens output[key].append(__SCREAMING_SNAKE_CASE ) if len(output[key] ) == 1: UpperCamelCase : Tuple = output[key][0] UpperCamelCase : List[Any] = tokens[tokens.find(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def _lowercase ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def _lowercase ( self ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor
315
1
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup __UpperCAmelCase : int = logging.get_logger(__name__) class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[Any] = [] UpperCamelCase : int = [] UpperCamelCase : List[Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag UpperCamelCase : Tuple = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) ) UpperCamelCase : Optional[Any] = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[Any] = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' ) UpperCamelCase : Union[str, Any] = [] UpperCamelCase : List[str] = [] UpperCamelCase : str = [] for element in html_code.descendants: if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue UpperCamelCase : Any = html.unescape(__SCREAMING_SNAKE_CASE ).strip() if not text_in_this_tag: continue all_doc_strings.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase , UpperCamelCase : int = self.xpath_soup(__SCREAMING_SNAKE_CASE ) stringaxtag_seq.append(__SCREAMING_SNAKE_CASE ) stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Optional[Any] = '''''' for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): xpath += f"""/{tagname}""" if subs != 0: xpath += f"""[{subs}]""" return xpath def __call__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = False # Check that strings has a valid type if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[Any] = True elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ): if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ): UpperCamelCase : List[str] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' f"""but is of type {type(__SCREAMING_SNAKE_CASE )}.""" ) UpperCamelCase : int = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) ) if not is_batched: UpperCamelCase : Union[str, Any] = [html_strings] # Get nodes + xpaths UpperCamelCase : str = [] UpperCamelCase : int = [] for html_string in html_strings: UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self.get_three_from_single(__SCREAMING_SNAKE_CASE ) nodes.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = [] for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase : str = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) xpath_strings.append(__SCREAMING_SNAKE_CASE ) xpaths.append(__SCREAMING_SNAKE_CASE ) # return as Dict UpperCamelCase : List[str] = {'''nodes''': nodes, '''xpaths''': xpaths} UpperCamelCase : List[Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) return encoded_inputs
315
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase : Union[str, Any] = { "configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"], "processing_mgp_str": ["MgpstrProcessor"], "tokenization_mgp_str": ["MgpstrTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Union[str, Any] = [ "MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST", "MgpstrModel", "MgpstrPreTrainedModel", "MgpstrForSceneTextRecognition", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys __UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
315
1
import numpy as np from transformers import Pipeline def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ): """simple docstring""" UpperCamelCase : List[str] = np.max(SCREAMING_SNAKE_CASE_ , axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ ) class UpperCAmelCase_ ( _a): '''simple docstring''' def _lowercase ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : str = {} if "second_text" in kwargs: UpperCamelCase : Union[str, Any] = kwargs['''second_text'''] return preprocess_kwargs, {}, {} def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" return self.tokenizer(__SCREAMING_SNAKE_CASE , text_pair=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.model(**__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = model_outputs.logits[0].numpy() UpperCamelCase : Optional[int] = softmax(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = np.argmax(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = self.model.config.idalabel[best_class] UpperCamelCase : str = probabilities[best_class].item() UpperCamelCase : Optional[Any] = logits.tolist() return {"label": label, "score": score, "logits": logits}
315
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ): """simple docstring""" UpperCamelCase : List[str] = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
315
1
def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return "".join(chr(ord(SCREAMING_SNAKE_CASE_ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
315
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = eval_examples UpperCamelCase : Optional[Any] = post_process_function def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ): """simple docstring""" UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCamelCase : Any = self.compute_metrics UpperCamelCase : List[Any] = None UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCamelCase : Dict = time.time() try: UpperCamelCase : str = eval_loop( __SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: UpperCamelCase : Union[str, Any] = compute_metrics UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions ) UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE ) metrics.update(output.metrics ) else: UpperCamelCase : List[Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(__SCREAMING_SNAKE_CASE ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE ) return metrics def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ): """simple docstring""" UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE ) # Temporarily disable metric computation, we will do it in the loop here. UpperCamelCase : Union[str, Any] = self.compute_metrics UpperCamelCase : Tuple = None UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCamelCase : Optional[int] = time.time() try: UpperCamelCase : int = eval_loop( __SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: UpperCamelCase : int = compute_metrics UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' ) UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
315
1
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : Optional[Any] = ["image_processor", "tokenizer"] __UpperCamelCase : Any = "ViltImageProcessor" __UpperCamelCase : Any = ("BertTokenizer", "BertTokenizerFast") def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __SCREAMING_SNAKE_CASE , ) UpperCamelCase : List[Any] = kwargs.pop('''feature_extractor''' ) UpperCamelCase : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = self.image_processor def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : Optional[Any] = self.tokenizer( text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , return_special_tokens_mask=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , return_length=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) # add pixel_values + pixel_mask UpperCamelCase : Tuple = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) encoding.update(__SCREAMING_SNAKE_CASE ) return encoding def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @property def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = self.tokenizer.model_input_names UpperCamelCase : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _lowercase ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def _lowercase ( self ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , ) return self.image_processor
315
from __future__ import annotations import collections import pprint from pathlib import Path def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return "".join(sorted(SCREAMING_SNAKE_CASE_ ) ) def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return word_by_signature[signature(SCREAMING_SNAKE_CASE_ )] __UpperCAmelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8") __UpperCAmelCase : Tuple = sorted({word.strip().lower() for word in data.splitlines()}) __UpperCAmelCase : Union[str, Any] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": __UpperCAmelCase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("anagrams.txt", "w") as file: file.write("all_anagrams = \n ") file.write(pprint.pformat(all_anagrams))
315
1
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 0 ): """simple docstring""" UpperCamelCase : Optional[Any] = length or len(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: UpperCamelCase , UpperCamelCase : int = list_data[i + 1], list_data[i] UpperCamelCase : int = True return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE_ , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
315
def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ): """simple docstring""" UpperCamelCase : list[list[float]] = [] for data in source_data: for i, el in enumerate(SCREAMING_SNAKE_CASE_ ): if len(SCREAMING_SNAKE_CASE_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) ) return data_lists def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ): """simple docstring""" UpperCamelCase : list[list[float]] = [] for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = max(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: UpperCamelCase : Dict = F"""Invalid weight of {weight:f} provided""" raise ValueError(SCREAMING_SNAKE_CASE_ ) score_lists.append(SCREAMING_SNAKE_CASE_ ) return score_lists def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ): """simple docstring""" UpperCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : str = final_scores[j] + ele return final_scores def a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ): """simple docstring""" UpperCamelCase : str = get_data(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = generate_final_scores(SCREAMING_SNAKE_CASE_ ) # append scores to source data for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ): source_data[i].append(SCREAMING_SNAKE_CASE_ ) return source_data
315
1
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE=0.01 , __SCREAMING_SNAKE_CASE=1_000 ): """simple docstring""" UpperCamelCase : Dict = p_stop UpperCamelCase : Any = max_length def __iter__( self ): """simple docstring""" UpperCamelCase : str = 0 UpperCamelCase : List[str] = False while not stop and count < self.max_length: yield count count += 1 UpperCamelCase : Optional[Any] = random.random() < self.p_stop class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ): """simple docstring""" UpperCamelCase : Optional[Any] = [ BatchSamplerShard(__SCREAMING_SNAKE_CASE , 2 , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) for i in range(2 ) ] UpperCamelCase : str = [list(__SCREAMING_SNAKE_CASE ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(__SCREAMING_SNAKE_CASE ) for shard in batch_sampler_shards] , [len(__SCREAMING_SNAKE_CASE ) for e in expected] ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = BatchSampler(range(24 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) # Expected shouldn't change self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. UpperCamelCase : Dict = BatchSampler(range(21 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = BatchSampler(range(21 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. UpperCamelCase : Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. UpperCamelCase : str = BatchSampler(range(20 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : int = BatchSampler(range(20 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is very small. UpperCamelCase : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = [[], []] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE ) # Expected shouldn't change self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size. UpperCamelCase : Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. UpperCamelCase : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is very small. UpperCamelCase : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = [[], []] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) # Expected shouldn't change self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. UpperCamelCase : Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. UpperCamelCase : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. UpperCamelCase : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is very small. UpperCamelCase : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = [[[0, 1]], []] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = [[], []] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = BatchSampler(range(24 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = BatchSampler(range(24 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE ) # Expected shouldn't change self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size. UpperCamelCase : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. UpperCamelCase : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) # Check the shards when the dataset is very small. UpperCamelCase : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[str] = [[[0, 1]], []] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = [[], []] self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] UpperCamelCase : int = [BatchSamplerShard(__SCREAMING_SNAKE_CASE , 2 , __SCREAMING_SNAKE_CASE , even_batches=__SCREAMING_SNAKE_CASE ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=False ): """simple docstring""" random.seed(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = list(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = [ IterableDatasetShard( __SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , drop_last=__SCREAMING_SNAKE_CASE , num_processes=__SCREAMING_SNAKE_CASE , process_index=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE , ) for i in range(__SCREAMING_SNAKE_CASE ) ] UpperCamelCase : Optional[Any] = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(__SCREAMING_SNAKE_CASE ) iterable_dataset_lists.append(list(__SCREAMING_SNAKE_CASE ) ) UpperCamelCase : Optional[int] = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size UpperCamelCase : str = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) ) self.assertTrue(len(__SCREAMING_SNAKE_CASE ) % shard_batch_size == 0 ) UpperCamelCase : int = [] for idx in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(__SCREAMING_SNAKE_CASE ) < len(__SCREAMING_SNAKE_CASE ): reference += reference self.assertListEqual(__SCREAMING_SNAKE_CASE , reference[: len(__SCREAMING_SNAKE_CASE )] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = 42 UpperCamelCase : Dict = RandomIterableDataset() self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE ) self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE ) self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE ) self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE ) # Edge case with a very small dataset UpperCamelCase : Dict = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE ) self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE ) self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE ) self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = BatchSampler(range(16 ) , batch_size=4 , drop_last=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = SkipBatchSampler(__SCREAMING_SNAKE_CASE , 2 ) self.assertListEqual(list(__SCREAMING_SNAKE_CASE ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Tuple = DataLoader(list(range(16 ) ) , batch_size=4 ) UpperCamelCase : Union[str, Any] = skip_first_batches(__SCREAMING_SNAKE_CASE , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(__SCREAMING_SNAKE_CASE ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__SCREAMING_SNAKE_CASE ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def _lowercase ( self ): """simple docstring""" Accelerator() UpperCamelCase : int = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(__SCREAMING_SNAKE_CASE ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__SCREAMING_SNAKE_CASE ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
315
import glob import os import random from string import ascii_lowercase, digits import cva __UpperCAmelCase : Optional[int] = "" __UpperCAmelCase : Union[str, Any] = "" __UpperCAmelCase : Optional[int] = "" __UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal) def a ( ): """simple docstring""" UpperCamelCase , UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print('''Processing...''' ) UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for index, image in enumerate(SCREAMING_SNAKE_CASE_ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' UpperCamelCase : Optional[int] = random_chars(3_2 ) UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] ) print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" ) UpperCamelCase : Any = [] for anno in new_annos[index]: UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(SCREAMING_SNAKE_CASE_ ) with open(F"""/{file_root}.txt""" , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" UpperCamelCase : Any = [] UpperCamelCase : Union[str, Any] = [] for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ): UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(SCREAMING_SNAKE_CASE_ ) as in_file: UpperCamelCase : List[str] = in_file.readlines() UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" ) UpperCamelCase : Union[str, Any] = [] for obj_list in obj_lists: UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(SCREAMING_SNAKE_CASE_ ) labels.append(SCREAMING_SNAKE_CASE_ ) return img_paths, labels def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ): """simple docstring""" UpperCamelCase : List[Any] = [] UpperCamelCase : str = [] UpperCamelCase : int = [] for idx in range(len(SCREAMING_SNAKE_CASE_ ) ): UpperCamelCase : Tuple = [] UpperCamelCase : Optional[int] = img_list[idx] path_list.append(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = anno_list[idx] UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ ) if flip_type == 1: UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for bbox in img_annos: UpperCamelCase : Optional[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for bbox in img_annos: UpperCamelCase : Union[str, Any] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(SCREAMING_SNAKE_CASE_ ) new_imgs_list.append(SCREAMING_SNAKE_CASE_ ) return new_imgs_list, new_annos_lists, path_list def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" UpperCamelCase : Any = ascii_lowercase + digits return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": main() print("DONE ✅")
315
1
from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging __UpperCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__() if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1: UpperCamelCase : int = ( f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`""" f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """ '''to update the config accordingly as leaving `steps_offset` might led to incorrect results''' ''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,''' ''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`''' ''' file''' ) deprecate('''steps_offset!=1''' , '''1.0.0''' , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = dict(scheduler.config ) UpperCamelCase : int = 1 UpperCamelCase : Any = FrozenDict(__SCREAMING_SNAKE_CASE ) if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False: UpperCamelCase : Dict = ( f"""The configuration file of this scheduler: {scheduler} has not set the configuration""" ''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make''' ''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to''' ''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face''' ''' Hub, it would be very nice if you could open a Pull request for the''' ''' `scheduler/scheduler_config.json` file''' ) deprecate('''skip_prk_steps not set''' , '''1.0.0''' , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = dict(scheduler.config ) UpperCamelCase : Optional[int] = True UpperCamelCase : int = FrozenDict(__SCREAMING_SNAKE_CASE ) if safety_checker is None: logger.warning( f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" ''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered''' ''' results in services or applications open to the public. Both the diffusers team and Hugging Face''' ''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling''' ''' it only for use-cases that involve analyzing network behavior or auditing its results. For more''' ''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' ) self.register_modules( segmentation_model=__SCREAMING_SNAKE_CASE , segmentation_processor=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , ) def _lowercase ( self , __SCREAMING_SNAKE_CASE = "auto" ): """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCamelCase : int = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" self.enable_attention_slicing(__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) UpperCamelCase : Optional[Any] = torch.device('''cuda''' ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _lowercase ( self ): """simple docstring""" if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(__SCREAMING_SNAKE_CASE , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 50 , __SCREAMING_SNAKE_CASE = 7.5 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : Union[str, Any] = self.segmentation_processor( text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device ) UpperCamelCase : List[str] = self.segmentation_model(**__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() UpperCamelCase : Optional[int] = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )[0].resize(image.size ) # Run inpainting pipeline with the generated mask UpperCamelCase : List[str] = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , )
315
import qiskit def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" UpperCamelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' ) UpperCamelCase : Any = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment return job.result().get_counts(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": __UpperCAmelCase : int = half_adder(1, 1) print(f'''Half Adder Output Qubit Counts: {counts}''')
315
1
import math def a ( ): """simple docstring""" UpperCamelCase : Optional[Any] = input('''Enter message: ''' ) UpperCamelCase : int = int(input(F"""Enter key [2-{len(SCREAMING_SNAKE_CASE_ ) - 1}]: """ ) ) UpperCamelCase : int = input('''Encryption/Decryption [e/d]: ''' ) if mode.lower().startswith('''e''' ): UpperCamelCase : List[Any] = encrypt_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif mode.lower().startswith('''d''' ): UpperCamelCase : List[str] = decrypt_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(F"""Output:\n{text + "|"}""" ) def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" UpperCamelCase : Any = [''''''] * key for col in range(SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[Any] = col while pointer < len(SCREAMING_SNAKE_CASE_ ): cipher_text[col] += message[pointer] pointer += key return "".join(SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" UpperCamelCase : Union[str, Any] = math.ceil(len(SCREAMING_SNAKE_CASE_ ) / key ) UpperCamelCase : List[str] = key UpperCamelCase : Any = (num_cols * num_rows) - len(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = [''''''] * num_cols UpperCamelCase : Tuple = 0 UpperCamelCase : Optional[Any] = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): UpperCamelCase : str = 0 row += 1 return "".join(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
315
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __UpperCAmelCase : str = logging.get_logger(__name__) def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ): """simple docstring""" UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ ) return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() ) class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : List[str] = CLIPConfig __UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"] def __init__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = CLIPVisionModel(config.vision_config ) UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE ) @torch.no_grad() def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy() UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy() UpperCamelCase : Dict = [] UpperCamelCase : List[str] = image_embeds.shape[0] for i in range(__SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images UpperCamelCase : Optional[int] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): UpperCamelCase : List[str] = special_cos_dist[i][concept_idx] UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item() UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} ) UpperCamelCase : Optional[int] = 0.01 for concept_idx in range(len(cos_dist[0] ) ): UpperCamelCase : Optional[int] = cos_dist[i][concept_idx] UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item() UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE ) result.append(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ) UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images UpperCamelCase : Union[str, Any] = 0.0 UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 ) UpperCamelCase : int = special_care * 0.01 UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
315
1
import inspect from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel, VQModel from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__() self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 50 , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : Union[str, Any] = randn_tensor( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__SCREAMING_SNAKE_CASE , ) UpperCamelCase : Any = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase : Tuple = latents * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature UpperCamelCase : Tuple = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase : Union[str, Any] = {} if accepts_eta: UpperCamelCase : Optional[Any] = eta for t in self.progress_bar(self.scheduler.timesteps ): UpperCamelCase : Dict = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # predict the noise residual UpperCamelCase : Any = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase : int = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample # decode the image latents with the VAE UpperCamelCase : str = self.vqvae.decode(__SCREAMING_SNAKE_CASE ).sample UpperCamelCase : str = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase : List[Any] = self.numpy_to_pil(__SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
315
from argparse import ArgumentParser from .env import EnvironmentCommand def a ( ): """simple docstring""" UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' ) UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' ) # Register commands EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ ) # Let's go UpperCamelCase : List[Any] = parser.parse_args() if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ): parser.print_help() exit(1 ) # Run UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ ) service.run() if __name__ == "__main__": main()
315
1
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' __UpperCamelCase : Optional[int] = inspect.getfile(accelerate.test_utils) __UpperCamelCase : List[Any] = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_cli.py"]) __UpperCamelCase : Optional[Any] = ["accelerate", "launch"] __UpperCamelCase : int = Path.home() / ".cache/huggingface/accelerate" __UpperCamelCase : Tuple = "default_config.yaml" __UpperCamelCase : int = config_folder / config_file __UpperCamelCase : Optional[Any] = config_folder / "_default_config.yaml" __UpperCamelCase : Tuple = Path("tests/test_configs") @classmethod def _lowercase ( cls ): """simple docstring""" if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def _lowercase ( cls ): """simple docstring""" if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def _lowercase ( self ): """simple docstring""" for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ): with self.subTest(config_file=__SCREAMING_SNAKE_CASE ): execute_subprocess_async( self.base_cmd + ['''--config_file''', str(__SCREAMING_SNAKE_CASE ), self.test_file_path] , env=os.environ.copy() ) def _lowercase ( self ): """simple docstring""" execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() ) class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' __UpperCamelCase : Optional[int] = "test-tpu" __UpperCamelCase : List[str] = "us-central1-a" __UpperCamelCase : Union[str, Any] = "ls" __UpperCamelCase : Tuple = ["accelerate", "tpu-config"] __UpperCamelCase : Tuple = "cd /usr/share" __UpperCamelCase : Tuple = "tests/test_samples/test_command_file.sh" __UpperCamelCase : Optional[Any] = "Running gcloud compute tpus tpu-vm ssh" def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = run_command( self.cmd + ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__SCREAMING_SNAKE_CASE , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , __SCREAMING_SNAKE_CASE , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=__SCREAMING_SNAKE_CASE , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , __SCREAMING_SNAKE_CASE , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__SCREAMING_SNAKE_CASE ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __SCREAMING_SNAKE_CASE , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__SCREAMING_SNAKE_CASE , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , __SCREAMING_SNAKE_CASE , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--command''', '''echo "Hello World"''', '''--debug''', ] , return_stdout=__SCREAMING_SNAKE_CASE , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , __SCREAMING_SNAKE_CASE , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__SCREAMING_SNAKE_CASE , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __SCREAMING_SNAKE_CASE , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command_file''', self.command_file, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=__SCREAMING_SNAKE_CASE , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __SCREAMING_SNAKE_CASE , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__SCREAMING_SNAKE_CASE , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __SCREAMING_SNAKE_CASE , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--accelerate_version''', '''12.0.0''', '''--debug''', ] , return_stdout=__SCREAMING_SNAKE_CASE , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __SCREAMING_SNAKE_CASE , )
315
def a ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" return "".join(chr(ord(SCREAMING_SNAKE_CASE_ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
315
1
from __future__ import annotations from math import ceil, floor, sqrt def a ( SCREAMING_SNAKE_CASE_ : int = 2_0_0_0_0_0_0 ): """simple docstring""" UpperCamelCase : list[int] = [0] UpperCamelCase : int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target UpperCamelCase : int = 0 # the area corresponding to the grid that gives the product closest to target UpperCamelCase : int = 0 # an estimate of b, using the quadratic formula UpperCamelCase : float # the largest integer less than b_estimate UpperCamelCase : int # the largest integer less than b_estimate UpperCamelCase : int # the triangle number corresponding to b_floor UpperCamelCase : int # the triangle number corresponding to b_ceil UpperCamelCase : int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): UpperCamelCase : List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 UpperCamelCase : Optional[int] = floor(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = ceil(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = triangle_numbers[b_floor] UpperCamelCase : List[str] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): UpperCamelCase : str = triangle_b_first_guess * triangle_a UpperCamelCase : List[Any] = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): UpperCamelCase : List[Any] = triangle_b_second_guess * triangle_a UpperCamelCase : Union[str, Any] = idx_a * b_ceil return area if __name__ == "__main__": print(f'''{solution() = }''')
315
import math def a ( SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ): """simple docstring""" UpperCamelCase : Tuple = factor * value UpperCamelCase : Optional[int] = value while not is_prime(SCREAMING_SNAKE_CASE_ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ ) return value
315
1
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=_a) class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : str = field(default="language-modeling", metadata={"include_in_asdict_even_if_is_default": True}) __UpperCamelCase : ClassVar[Features] = Features({"text": Value("string")}) __UpperCamelCase : ClassVar[Features] = Features({}) __UpperCamelCase : str = "text" @property def _lowercase ( self ): """simple docstring""" return {self.text_column: "text"}
315
import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor __UpperCAmelCase : Optional[int] = logging.get_logger(__name__) class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" warnings.warn( '''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , ) super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
315
1
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase : Optional[int] = logging.get_logger(__name__) __UpperCAmelCase : Union[str, Any] = { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json", } class UpperCAmelCase_ ( _a): '''simple docstring''' __UpperCamelCase : int = "xlnet" __UpperCamelCase : int = ["mems"] __UpperCamelCase : int = { "n_token": "vocab_size", # Backward compatibility "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , __SCREAMING_SNAKE_CASE=32_000 , __SCREAMING_SNAKE_CASE=1_024 , __SCREAMING_SNAKE_CASE=24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=4_096 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="bi" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=-1 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="last" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="tanh" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase : Tuple = vocab_size UpperCamelCase : Dict = d_model UpperCamelCase : List[Any] = n_layer UpperCamelCase : Optional[int] = n_head if d_model % n_head != 0: raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" ) UpperCamelCase : Any = d_model // n_head UpperCamelCase : Optional[Any] = ff_activation UpperCamelCase : Any = d_inner UpperCamelCase : Union[str, Any] = untie_r UpperCamelCase : Any = attn_type UpperCamelCase : Dict = initializer_range UpperCamelCase : List[Any] = layer_norm_eps UpperCamelCase : Optional[int] = dropout UpperCamelCase : Any = mem_len UpperCamelCase : Union[str, Any] = reuse_len UpperCamelCase : Any = bi_data UpperCamelCase : Optional[int] = clamp_len UpperCamelCase : List[Any] = same_length UpperCamelCase : Optional[Any] = summary_type UpperCamelCase : int = summary_use_proj UpperCamelCase : Tuple = summary_activation UpperCamelCase : Dict = summary_last_dropout UpperCamelCase : Any = start_n_top UpperCamelCase : int = end_n_top UpperCamelCase : Optional[Any] = bos_token_id UpperCamelCase : int = pad_token_id UpperCamelCase : Any = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , __SCREAMING_SNAKE_CASE , ) UpperCamelCase : Any = kwargs['''use_cache'''] UpperCamelCase : Optional[Any] = use_mems_eval UpperCamelCase : str = use_mems_train super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @property def _lowercase ( self ): """simple docstring""" logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def _lowercase ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
315
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ): """simple docstring""" UpperCamelCase : List[str] = size if size is not None else {'''height''': 18, '''width''': 18} UpperCamelCase : int = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : Optional[int] = num_channels UpperCamelCase : Union[str, Any] = image_size UpperCamelCase : Union[str, Any] = min_resolution UpperCamelCase : Tuple = max_resolution UpperCamelCase : List[str] = do_resize UpperCamelCase : List[str] = size UpperCamelCase : int = apply_ocr def _lowercase ( self ): """simple docstring""" return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class UpperCAmelCase_ ( _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[Any] = LayoutLMvaImageProcessingTester(self ) @property def _lowercase ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''apply_ocr''' ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def _lowercase ( self ): """simple docstring""" pass def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE ) self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE ) # Test batched UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCamelCase : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCamelCase : Optional[int] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : List[str] = LayoutLMvaImageProcessor() from datasets import load_dataset UpperCamelCase : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' ) UpperCamelCase : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 UpperCamelCase : Union[str, Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 UpperCamelCase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE ) self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE ) # with apply_OCR = False UpperCamelCase : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE ) UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
315
1