code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = torch.device('''cpu''')
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
def _a ( UpperCamelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for k in state_dict.keys():
lowerCAmelCase__ = k
if ".pwconv" in k:
lowerCAmelCase__ = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
lowerCAmelCase__ = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
lowerCAmelCase__ = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
lowerCAmelCase__ = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
lowerCAmelCase__ = k_new.split("." )
if ls[2].isdigit():
lowerCAmelCase__ = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
lowerCAmelCase__ = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _a ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCAmelCase__ = [3, 3, 6, 4]
lowerCAmelCase__ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCAmelCase__ = [3, 3, 9, 6]
lowerCAmelCase__ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCAmelCase__ = [4, 3, 10, 5]
lowerCAmelCase__ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCAmelCase__ = [4, 4, 12, 6]
lowerCAmelCase__ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" , check_hash=UpperCamelCase_ )
else:
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )
lowerCAmelCase__ = checkpoint
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# load HuggingFace model
lowerCAmelCase__ = SwiftFormerForImageClassification(UpperCamelCase_ ).eval()
hf_model.load_state_dict(UpperCamelCase_ )
# prepare test inputs
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = ViTImageProcessor.from_pretrained("preprocessor_config" )
lowerCAmelCase__ = processor(images=UpperCamelCase_ , return_tensors="pt" )
# compare outputs from both models
lowerCAmelCase__ = get_expected_output(UpperCamelCase_ )
lowerCAmelCase__ = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase_ , atol=1e-3 )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(F"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
a_ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 340
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """tokenizer"""]
a_ ="""LayoutLMv2ImageProcessor"""
a_ =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
lowerCAmelCase__ = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ = features["words"]
lowerCAmelCase__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
lowerCAmelCase__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCAmelCase__ = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
lowerCAmelCase__ = images
return encoded_inputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" )
return images_with_overflow
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 340
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
snake_case_ = 'focalnet'
def __init__( self : Union[str, Any] , snake_case : Tuple=224 , snake_case : Tuple=4 , snake_case : List[Any]=3 , snake_case : int=96 , snake_case : List[Any]=False , snake_case : List[Any]=[192, 384, 768, 768] , snake_case : Union[str, Any]=[2, 2, 6, 2] , snake_case : str=[2, 2, 2, 2] , snake_case : Tuple=[3, 3, 3, 3] , snake_case : List[str]="gelu" , snake_case : Any=4.0 , snake_case : Any=0.0 , snake_case : Optional[Any]=0.1 , snake_case : Union[str, Any]=False , snake_case : Any=1e-4 , snake_case : Dict=False , snake_case : Optional[Any]=False , snake_case : Dict=False , snake_case : Optional[Any]=0.02 , snake_case : Dict=1e-5 , snake_case : Optional[int]=32 , snake_case : int=None , snake_case : List[Any]=None , **snake_case : Any , ):
'''simple docstring'''
super().__init__(**snake_case )
A__ : Tuple = image_size
A__ : Dict = patch_size
A__ : Tuple = num_channels
A__ : str = embed_dim
A__ : Union[str, Any] = use_conv_embed
A__ : Union[str, Any] = hidden_sizes
A__ : int = depths
A__ : str = focal_levels
A__ : str = focal_windows
A__ : Union[str, Any] = hidden_act
A__ : Any = mlp_ratio
A__ : Any = hidden_dropout_prob
A__ : str = drop_path_rate
A__ : Dict = use_layerscale
A__ : str = layerscale_value
A__ : Optional[Any] = use_post_layernorm
A__ : Any = use_post_layernorm_in_modulation
A__ : Optional[int] = normalize_modulator
A__ : Dict = initializer_range
A__ : str = layer_norm_eps
A__ : int = encoder_stride
A__ : List[Any] = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
A__ : str = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
| 359
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[Any]=13 , snake_case : Union[str, Any]=7 , snake_case : Optional[Any]=True , snake_case : str=True , snake_case : Dict=False , snake_case : Union[str, Any]=True , snake_case : Optional[Any]=99 , snake_case : str=32 , snake_case : Tuple=5 , snake_case : List[str]=4 , snake_case : Optional[int]=37 , snake_case : str="gelu" , snake_case : Tuple=0.1 , snake_case : Optional[int]=0.1 , snake_case : int=512 , snake_case : List[str]=16 , snake_case : str=2 , snake_case : Optional[int]=0.02 , snake_case : str=3 , snake_case : Dict=4 , snake_case : Optional[Any]=None , ):
'''simple docstring'''
A__ : int = parent
A__ : Union[str, Any] = batch_size
A__ : Optional[int] = seq_length
A__ : List[Any] = is_training
A__ : List[str] = use_input_mask
A__ : Optional[Any] = use_token_type_ids
A__ : List[Any] = use_labels
A__ : Union[str, Any] = vocab_size
A__ : List[Any] = hidden_size
A__ : Any = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : Any = hidden_act
A__ : Tuple = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Optional[int] = max_position_embeddings
A__ : Tuple = type_vocab_size
A__ : Union[str, Any] = type_sequence_label_size
A__ : List[str] = initializer_range
A__ : Any = num_labels
A__ : Any = num_choices
A__ : int = scope
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = None
if self.use_input_mask:
A__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] = None
if self.use_token_type_ids:
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : int = None
A__ : int = None
A__ : List[str] = None
if self.use_labels:
A__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case )
A__ : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Optional[int] , snake_case : List[str] , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] , snake_case : List[Any] , snake_case : Tuple , snake_case : Optional[Any] , ):
'''simple docstring'''
A__ : List[str] = BioGptForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Any , snake_case : str , snake_case : Tuple , snake_case : int , snake_case : Optional[Any] , snake_case : Any , *snake_case : Dict ):
'''simple docstring'''
A__ : Union[str, Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
# create attention mask
A__ : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
A__ : Any = self.seq_length // 2
A__ : str = 0
# first forward pass
A__ , A__ : List[Any] = model(snake_case , attention_mask=snake_case ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A__ : List[str] = ids_tensor((1,) , snake_case ).item() + 1
A__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A__ : int = random_other_next_tokens
# append to next input_ids and attn_mask
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : List[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case )] , dim=1 , )
# get two different outputs
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Optional[int] = model(snake_case , past_key_values=snake_case , attention_mask=snake_case )["""last_hidden_state"""]
# select random slice
A__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
A__ : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : str , snake_case : int , snake_case : Optional[Any] , *snake_case : str ):
'''simple docstring'''
A__ : Dict = BioGptModel(config=snake_case ).to(snake_case ).eval()
A__ : Tuple = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
# first forward pass
A__ : Dict = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
A__ , A__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A__ : Any = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[
"""last_hidden_state"""
]
# select random slice
A__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : Tuple , *snake_case : Union[str, Any] , snake_case : Union[str, Any]=False ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM(snake_case )
model.to(snake_case )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A__ : Optional[Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , *snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = BioGptModel(snake_case )
A__ : Union[str, Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _UpperCamelCase ( self : Any , snake_case : Dict , snake_case : Tuple , snake_case : int , snake_case : Union[str, Any] , snake_case : Dict , *snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = self.num_labels
A__ : int = BioGptForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : str = config_and_inputs
A__ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
snake_case_ = (BioGptForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[str] = BioGptModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : str = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case , gradient_checkpointing=snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
A__ : Optional[int] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = """left"""
# Define PAD Token = EOS Token = 50256
A__ : Optional[int] = tokenizer.eos_token
A__ : Dict = model.config.eos_token_id
# use different length sentences to test batching
A__ : Union[str, Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A__ : List[str] = tokenizer(snake_case , return_tensors="""pt""" , padding=snake_case )
A__ : str = inputs["""input_ids"""].to(snake_case )
A__ : Dict = model.generate(
input_ids=snake_case , attention_mask=inputs["""attention_mask"""].to(snake_case ) , )
A__ : Optional[int] = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Any = model.generate(input_ids=snake_case )
A__ : List[str] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
A__ : str = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Dict = model.generate(input_ids=snake_case , max_length=model.config.max_length - num_paddings )
A__ : Optional[Any] = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
A__ : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case )
A__ : str = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case )
A__ : Optional[int] = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , [non_padded_sentence, padded_sentence] )
@slow
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[Any] = BioGptModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Optional[int] = 3
A__ : List[Any] = input_dict["""input_ids"""]
A__ : Dict = input_ids.ne(1 ).to(snake_case )
A__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ : Union[str, Any] = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ , A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Any = 3
A__ : List[Any] = """multi_label_classification"""
A__ : Dict = input_dict["""input_ids"""]
A__ : Tuple = input_ids.ne(1 ).to(snake_case )
A__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ : Tuple = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
A__ : str = torch.tensor([[2, 4805, 9, 656, 21]] )
A__ : Dict = model(snake_case )[0]
A__ : Tuple = 4_2384
A__ : str = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : str = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Tuple = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
torch.manual_seed(0 )
A__ : Tuple = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(snake_case )
A__ : Optional[int] = model.generate(
**snake_case , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=snake_case , )
A__ : Optional[int] = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case )
A__ : List[str] = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(snake_case , snake_case )
| 296
| 0
|
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _lowercase):
snake_case__ = ['''pixel_values''']
def __init__( self : str , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 255 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , __UpperCamelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **__UpperCamelCase : str , ) -> None:
super().__init__(**__UpperCamelCase )
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 224}
_UpperCamelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCamelCase = get_size_dict(__UpperCamelCase , param_name='''crop_size''' )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Dict , ) -> np.ndarray:
_UpperCamelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_UpperCamelCase = int((256 / 224) * size['''shortest_edge'''] )
_UpperCamelCase = get_resize_output_image_size(__UpperCamelCase , size=__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCamelCase = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
__UpperCamelCase , size=(size_dict['''height'''], size_dict['''width''']) , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : int , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[Any] , ) -> np.ndarray:
_UpperCamelCase = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : int , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[int, float] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Dict , ) -> np.ndarray:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Optional[Any] , ) -> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : ImageInput , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Dict[str, int]] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Dict[str, int]] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[float] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[float, Iterable[float]]] = None , __UpperCamelCase : Optional[Union[float, Iterable[float]]] = None , __UpperCamelCase : Optional[TensorType] = None , __UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCamelCase : int , ) -> BatchFeature:
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(__UpperCamelCase , param_name='''crop_size''' )
_UpperCamelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(__UpperCamelCase , __UpperCamelCase ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(__UpperCamelCase , __UpperCamelCase ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 256
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
UpperCAmelCase = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class UpperCAmelCase_ ( unittest.TestCase , _lowercase):
def _UpperCamelCase ( self : List[str] ) -> Tuple:
_UpperCamelCase = load_tool('''text-question-answering''' )
self.tool.setup()
_UpperCamelCase = load_tool('''text-question-answering''' , remote=__UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> int:
_UpperCamelCase = self.tool(__UpperCamelCase , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(__UpperCamelCase , '''launched the BigScience Research Workshop''' )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase = self.remote_tool(__UpperCamelCase , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(__UpperCamelCase , '''launched the BigScience Research Workshop''' )
def _UpperCamelCase ( self : Dict ) -> int:
_UpperCamelCase = self.tool(text=__UpperCamelCase , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(__UpperCamelCase , '''launched the BigScience Research Workshop''' )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase = self.remote_tool(text=__UpperCamelCase , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(__UpperCamelCase , '''launched the BigScience Research Workshop''' )
| 256
| 1
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
__snake_case = ""
__snake_case = ""
__snake_case = ""
__snake_case = ""
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = tweepy.OAuthHandler(_lowerCAmelCase, _lowerCAmelCase )
auth.set_access_token(_lowerCAmelCase, _lowerCAmelCase )
_a = tweepy.API(_lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_a = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_a = api.user_timeline(screen_name=_lowerCAmelCase, count=2_00 )
# save most recent tweets
alltweets.extend(_lowerCAmelCase )
# save the id of the oldest tweet less one
_a = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_lowerCAmelCase ) > 0:
print(f'getting tweets before {oldest}' )
# all subsequent requests use the max_id param to prevent duplicates
_a = api.user_timeline(
screen_name=_lowerCAmelCase, count=2_00, max_id=_lowerCAmelCase )
# save most recent tweets
alltweets.extend(_lowerCAmelCase )
# update the id of the oldest tweet less one
_a = alltweets[-1].id - 1
print(f'...{len(_lowerCAmelCase )} tweets downloaded so far' )
# transform the tweepy tweets into a 2D array that will populate the csv
_a = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'new_{screen_name}_tweets.csv', '''w''' ) as f:
_a = csv.writer(_lowerCAmelCase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(_lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 355
|
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = tempfile.mkdtemp()
_a = 8
# DPR tok
_a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_a = os.path.join(__UpperCAmelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_a = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _UpperCAmelCase ( self ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _UpperCAmelCase ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def _UpperCAmelCase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> str:
_a = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.get_dummy_dataset()
_a = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_a = dataset
_a = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> int:
_a = self.get_dummy_dataset()
_a = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_a = os.path.join(self.tmpdirname , '''dataset''' )
_a = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_a = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_a = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase ) , )
return retriever
def _UpperCAmelCase ( self ) -> int:
_a = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_a = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_a = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_a = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__UpperCAmelCase , open(__UpperCAmelCase , '''wb''' ) )
_a = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_a = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _UpperCAmelCase ( self ) -> int:
_a = 1
_a = self.get_dummy_canonical_hf_index_retriever()
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a , _a , _a = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCAmelCase ( self ) -> List[Any]:
_a = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_a = self.get_dummy_dataset()
retriever.save_pretrained(__UpperCAmelCase )
_a = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def _UpperCAmelCase ( self ) -> Dict:
_a = 1
_a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a , _a , _a = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCAmelCase ( self ) -> int:
_a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
_a = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def _UpperCAmelCase ( self ) -> Any:
_a = 1
_a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a , _a , _a = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
_a = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def _UpperCAmelCase ( self ) -> List[str]:
_a = 1
_a = self.get_dummy_legacy_index_retriever()
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a , _a , _a = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
_a = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _UpperCAmelCase ( self ) -> Any:
import torch
_a = 1
_a = self.get_dummy_canonical_hf_index_retriever()
_a = [[5, 7], [10, 11]]
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
_a , _a , _a = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
_a = retriever(
__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase , return_tensors='''pt''' , )
_a , _a , _a , _a = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _UpperCAmelCase ( self ) -> List[Any]:
_a = self.get_dpr_ctx_encoder_tokenizer()
_a = 1
_a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
retriever.set_ctx_encoder_tokenizer(__UpperCAmelCase )
_a = [[5, 7], [10, 11]]
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
self.assertEqual(
len(__UpperCAmelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __UpperCAmelCase ) # check for doc token related keys in dictionary.
| 153
| 0
|
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( UpperCamelCase__ ):
a__ : Optional[int] = (UnCLIPScheduler,)
def _lowercase (self : str , **__a : Optional[Any] ):
UpperCAmelCase_ = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**__a )
return config
def _lowercase (self : Any ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def _lowercase (self : Union[str, Any] ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__a )
def _lowercase (self : Union[str, Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a )
def _lowercase (self : List[Any] ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__a )
def _lowercase (self : Union[str, Any] ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__a )
def _lowercase (self : Optional[Any] ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__a , prev_timestep=__a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(variance_type="fixed_small_log" )
UpperCAmelCase_ = scheduler_class(**__a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(variance_type="learned_range" )
UpperCAmelCase_ = scheduler_class(**__a )
UpperCAmelCase_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__a ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=__a ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=__a ) - -0.0_01_00_11 < 1E-5
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**__a )
UpperCAmelCase_ = scheduler.timesteps
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
UpperCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(__a ):
# 1. predict noise residual
UpperCAmelCase_ = model(__a , __a )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
UpperCAmelCase_ = pred_prev_sample
UpperCAmelCase_ = torch.sum(torch.abs(__a ) )
UpperCAmelCase_ = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**__a )
scheduler.set_timesteps(25 )
UpperCAmelCase_ = scheduler.timesteps
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
UpperCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(__a ):
# 1. predict noise residual
UpperCAmelCase_ = model(__a , __a )
if i + 1 == timesteps.shape[0]:
UpperCAmelCase_ = None
else:
UpperCAmelCase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ = scheduler.step(
__a , __a , __a , prev_timestep=__a , generator=__a ).prev_sample
UpperCAmelCase_ = pred_prev_sample
UpperCAmelCase_ = torch.sum(torch.abs(__a ) )
UpperCAmelCase_ = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def _lowercase (self : List[str] ):
pass
def _lowercase (self : Any ):
pass
| 1
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class A__ :
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def _lowerCamelCase ( self : Optional[int] , a : List[Any]=True , a : Any=False , a : Dict=False , a : Union[str, Any]=False , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 4
lowerCAmelCase__ : int = 32
lowerCAmelCase__ : Tuple = (32, 32)
lowerCAmelCase__ : List[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = torch.device(a )
lowerCAmelCase__ : str = (batch_size, num_channels) + sizes
lowerCAmelCase__ : Tuple = randn_tensor(a , generator=a , device=a )
lowerCAmelCase__ : Optional[Any] = {'hidden_states': hidden_states}
if include_temb:
lowerCAmelCase__ : int = 128
lowerCAmelCase__ : List[str] = randn_tensor((batch_size, temb_channels) , generator=a , device=a )
if include_res_hidden_states_tuple:
lowerCAmelCase__ : int = torch.manual_seed(1 )
lowerCAmelCase__ : str = (randn_tensor(a , generator=a , device=a ),)
if include_encoder_hidden_states:
lowerCAmelCase__ : Any = floats_tensor((batch_size, 32, 32) ).to(a )
if include_skip_sample:
lowerCAmelCase__ : Union[str, Any] = randn_tensor(((batch_size, 3) + sizes) , generator=a , device=a )
return dummy_input
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
lowerCAmelCase__ : Union[str, Any] = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
lowerCAmelCase__ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self : str , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase__ : int = self.block_class(**a )
unet_block.to(a )
unet_block.eval()
with torch.no_grad():
lowerCAmelCase__ : int = unet_block(**a )
if isinstance(a , a ):
lowerCAmelCase__ : List[str] = output[0]
self.assertEqual(output.shape , self.output_shape )
lowerCAmelCase__ : List[str] = output[0, -1, -3:, -3:]
lowerCAmelCase__ : Any = torch.tensor(a ).to(a )
assert torch_all_close(output_slice.flatten() , a , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase__ : Any = self.block_class(**a )
model.to(a )
model.train()
lowerCAmelCase__ : int = model(**a )
if isinstance(a , a ):
lowerCAmelCase__ : Dict = output[0]
lowerCAmelCase__ : Optional[int] = torch.device(a )
lowerCAmelCase__ : List[Any] = randn_tensor(output.shape , device=a )
lowerCAmelCase__ : List[Any] = torch.nn.functional.mse_loss(a , a )
loss.backward()
| 212
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'dandelin/vilt-b32-finetuned-vqa'
lowercase__ = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
lowercase__ = 'image_qa'
lowercase__ = AutoProcessor
lowercase__ = AutoModelForVisualQuestionAnswering
lowercase__ = ['image', 'text']
lowercase__ = ['text']
def __init__( self , *__a , **__a) -> int:
'''simple docstring'''
requires_backends(self , ['''vision'''])
super().__init__(*__a , **__a)
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
return self.pre_processor(__a , __a , return_tensors='''pt''')
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
with torch.no_grad():
return self.model(**__a).logits
def UpperCAmelCase ( self , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = outputs.argmax(-1).item()
return self.model.config.idalabel[idx]
| 100
|
"""simple docstring"""
class _UpperCAmelCase:
def __init__( self , __a , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = name
_UpperCamelCase = value
_UpperCamelCase = weight
def __repr__( self) -> List[str]:
'''simple docstring'''
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return self.value
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return self.name
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.weight
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return self.value / self.weight
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(len(__snake_case ) ):
menu.append(Things(name[i], value[i], weight[i] ) )
return menu
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = sorted(__snake_case, key=__snake_case, reverse=__snake_case )
_UpperCamelCase = []
_UpperCamelCase , _UpperCamelCase = 0.0, 0.0
for i in range(len(__snake_case ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100
| 1
|
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
snake_case_ = """\
"""
snake_case_ = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
snake_case_ = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self :str ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def UpperCAmelCase__ ( self :str , lowercase_ :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :int = 16 , lowercase_ :bool = True , lowercase_ :Any=None ) -> Optional[int]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase = 'cuda'
else:
UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase = AutoModelForCausalLM.from_pretrained(lowercase_ )
UpperCAmelCase = model.to(lowercase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowercase_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase = model.config.max_length - 1
else:
UpperCAmelCase = model.config.max_length
UpperCAmelCase = tokenizer(
lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors='pt' , return_attention_mask=lowercase_ , ).to(lowercase_ )
UpperCAmelCase = encodings['input_ids']
UpperCAmelCase = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase = []
UpperCAmelCase = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(lowercase_ ) , lowercase_ ) ):
UpperCAmelCase = min(start_index + batch_size , len(lowercase_ ) )
UpperCAmelCase = encoded_texts[start_index:end_index]
UpperCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowercase_ )
UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(lowercase_ ), attn_mask] , dim=1 )
UpperCAmelCase = encoded_batch
with torch.no_grad():
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ ).logits
UpperCAmelCase = out_logits[..., :-1, :].contiguous()
UpperCAmelCase = labels[..., 1:].contiguous()
UpperCAmelCase = attn_mask[..., 1:].contiguous()
UpperCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , lowercase_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowercase_ )}
| 78
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase :Union[str, Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 331
| 0
|
"""simple docstring"""
_UpperCamelCase: Dict = 2_5_6
# Modulus to hash a string
_UpperCamelCase: Union[str, Any] = 1_0_0_0_0_0_3
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
'''simple docstring'''
lowercase : Dict = len(_UpperCAmelCase )
lowercase : Union[str, Any] = len(_UpperCAmelCase )
if p_len > t_len:
return False
lowercase : Union[str, Any] = 0
lowercase : Dict = 0
lowercase : Any = 1
# Calculating the hash of pattern and substring of text
for i in range(_UpperCAmelCase ):
lowercase : Dict = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase : Tuple = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase : Tuple = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase : str = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase__ ( ) -> None:
'''simple docstring'''
lowercase : Any = 'abc1abc12'
lowercase : int = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
lowercase : Optional[int] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase ) and not rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
# Test 2)
lowercase : str = 'ABABX'
lowercase : Tuple = 'ABABZABABYABABX'
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
# Test 3)
lowercase : int = 'AAAB'
lowercase : Union[str, Any] = 'ABAAAAAB'
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
# Test 4)
lowercase : Union[str, Any] = 'abcdabcy'
lowercase : List[str] = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
# Test 5)
lowercase : Dict = 'Lü'
lowercase : Dict = 'Lüsai'
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
lowercase : List[Any] = 'Lue'
assert not rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 53
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase: List[str] = logging.get_logger(__name__)
_UpperCamelCase: List[str] = {'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase: str = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = ['input_ids', 'attention_mask']
_lowerCamelCase = None
def __init__( self : Tuple, lowerCAmelCase : Tuple=None, lowerCAmelCase : Optional[Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : Union[str, Any]="<unk>", lowerCAmelCase : Any="<s>", lowerCAmelCase : str="</s>", lowerCAmelCase : Tuple="<pad>", lowerCAmelCase : Dict=False, lowerCAmelCase : Union[str, Any]=False, **lowerCAmelCase : Optional[Any], ) -> str:
super().__init__(
lowerCAmelCase, lowerCAmelCase, tokenizer_file=lowerCAmelCase, unk_token=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, pad_token=lowerCAmelCase, add_prefix_space=lowerCAmelCase, clean_up_tokenization_spaces=lowerCAmelCase, **lowerCAmelCase, )
lowercase : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowerCAmelCase ) != add_prefix_space:
lowercase : Dict = getattr(lowerCAmelCase, pre_tok_state.pop('type' ) )
lowercase : Optional[Any] = add_prefix_space
lowercase : List[str] = pre_tok_class(**lowerCAmelCase )
lowercase : List[str] = add_prefix_space
def lowercase ( self : Dict, *lowerCAmelCase : Tuple, **lowerCAmelCase : List[Any] ) -> BatchEncoding:
lowercase : str = kwargs.get('is_split_into_words', lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._batch_encode_plus(*lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : List[Any], *lowerCAmelCase : Dict, **lowerCAmelCase : Dict ) -> BatchEncoding:
lowercase : List[str] = kwargs.get('is_split_into_words', lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._encode_plus(*lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Optional[int], lowerCAmelCase : str, lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
lowercase : Optional[Any] = self._tokenizer.model.save(lowerCAmelCase, name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def lowercase ( self : Tuple, lowerCAmelCase : "Conversation" ) -> List[int]:
lowercase : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
lowercase : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 53
| 1
|
import argparse
import datetime
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
SCREAMING_SNAKE_CASE : str = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_lowercase ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
SCREAMING_SNAKE_CASE : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
SCREAMING_SNAKE_CASE : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
SCREAMING_SNAKE_CASE : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
SCREAMING_SNAKE_CASE : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
SCREAMING_SNAKE_CASE : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
SCREAMING_SNAKE_CASE : int = datetime.date(int(_lowercase ) , int(_lowercase ) , int(_lowercase ) )
# Start math
if m <= 2:
SCREAMING_SNAKE_CASE : str = y - 1
SCREAMING_SNAKE_CASE : str = m + 12
# maths var
SCREAMING_SNAKE_CASE : int = int(str(_lowercase )[:2] )
SCREAMING_SNAKE_CASE : int = int(str(_lowercase )[2:] )
SCREAMING_SNAKE_CASE : int = int(2.6 * m - 5.39 )
SCREAMING_SNAKE_CASE : int = int(c / 4 )
SCREAMING_SNAKE_CASE : int = int(k / 4 )
SCREAMING_SNAKE_CASE : int = int(d + k )
SCREAMING_SNAKE_CASE : int = int(t + u + v + x )
SCREAMING_SNAKE_CASE : int = int(z - (2 * c) )
SCREAMING_SNAKE_CASE : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
SCREAMING_SNAKE_CASE : str = f"""Your date {date_input}, is a {days[str(_lowercase )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : List[str] = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
__UpperCamelCase : Union[str, Any] = parser.parse_args()
zeller(args.date_input)
| 182
|
from math import ceil
def A ( _lowercase = 1_001 ):
SCREAMING_SNAKE_CASE : Any = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * i + 1
SCREAMING_SNAKE_CASE : int = 2 * i
SCREAMING_SNAKE_CASE : List[str] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCamelCase : Dict = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 182
| 1
|
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__lowercase : Optional[Any] = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
__lowercase : List[str] = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
__lowercase : List[Any] = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
__lowercase : int = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
__lowercase : Union[str, Any] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def __UpperCAmelCase ( self , __a , __a , __a=[1, 10, 100] , __a=4 , __a=3.0 ):
'''simple docstring'''
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=__a ) as executor:
__a : Tuple = []
__a : Optional[int] = Counter()
__a : Optional[int] = 0
__a : Any = defaultdict(__a )
for task_id, (candidates, test_case) in enumerate(zip(__a , __a ) ):
for candidate in candidates:
__a : Optional[int] = candidate + '\n' + test_case
__a : Any = (test_program, timeout, task_id, completion_id[task_id])
__a : Any = executor.submit(__a , *__a )
futures.append(__a )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__a ):
__a : Optional[Any] = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
__a , __a : List[str] = [], []
for result in results.values():
result.sort()
__a : Optional[Any] = [r[1]['passed'] for r in result]
total.append(len(__a ) )
correct.append(sum(__a ) )
__a : Tuple = np.array(__a )
__a : Optional[Any] = np.array(__a )
__a : List[Any] = k
__a : Optional[Any] = {f"""pass@{k}""": estimate_pass_at_k(__a , __a , __a ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple ):
def estimator(_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Optional[int] = itertools.repeat(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
else:
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
__a : Any = iter(_SCREAMING_SNAKE_CASE )
return np.array([estimator(int(_SCREAMING_SNAKE_CASE ) , int(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) for n, c in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] )
| 294
|
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 294
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=a__ ):
lowerCamelCase : Tuple =["""speech"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
requires_backends(self , ["speech"] )
class __UpperCamelCase ( metaclass=a__ ):
lowerCamelCase : Optional[Any] =["""speech"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
requires_backends(self , ["speech"] )
| 105
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa"""
UpperCAmelCase : Tuple = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
UpperCAmelCase : List[str] = """document_qa"""
UpperCAmelCase : str = AutoProcessor
UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel
UpperCAmelCase : int = ["""image""", """text"""]
UpperCAmelCase : int = ["""text"""]
def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.")
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str):
a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase)
a : Optional[Any] = self.pre_processor.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids
a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __snake_case ( self : int , __UpperCAmelCase : int):
return self.model.generate(
inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences
def __snake_case ( self : str , __UpperCAmelCase : List[Any]):
a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0]
a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "")
a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "")
a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token
a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase)
return sequence["answer"]
| 40
| 0
|
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__UpperCamelCase = '''sshleifer/bart-tiny-random'''
__UpperCamelCase = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ) -> List[str]:
return AutoConfig.from_pretrained(lowerCAmelCase__ )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=1 , d=lowerCAmelCase__ )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=1 , d=lowerCAmelCase__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def __A ( self ) -> Union[str, Any]:
with self.assertRaises(lowerCAmelCase__ ):
create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=lowerCAmelCase__ , d=lowerCAmelCase__ )
| 38
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features({"""audio""": Audio()} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features({"""labels""": ClassLabel} )
SCREAMING_SNAKE_CASE_ : str = "audio"
SCREAMING_SNAKE_CASE_ : str = "labels"
def __A ( self , lowerCAmelCase__ ) -> Any:
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE = copy.deepcopy(self )
SCREAMING_SNAKE_CASE = self.label_schema.copy()
SCREAMING_SNAKE_CASE = features[self.label_column]
SCREAMING_SNAKE_CASE = label_schema
return task_template
@property
def __A ( self ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 38
| 1
|
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( ) -> Iterator[int]:
snake_case_ = 2
while True:
if is_prime(_SCREAMING_SNAKE_CASE ):
yield num
num += 1
def _a ( _SCREAMING_SNAKE_CASE = 2_000_000 ) -> int:
return sum(takewhile(lambda _SCREAMING_SNAKE_CASE : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 347
|
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE )
# 2) Step through text searching for pattern
snake_case_ , snake_case_ = 0, 0 # index into text, pattern
while i < len(_SCREAMING_SNAKE_CASE ):
if pattern[j] == text[i]:
if j == (len(_SCREAMING_SNAKE_CASE ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
snake_case_ = failure[j - 1]
continue
i += 1
return False
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = [0]
snake_case_ = 0
snake_case_ = 1
while j < len(_SCREAMING_SNAKE_CASE ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
snake_case_ = failure[i - 1]
continue
j += 1
failure.append(_SCREAMING_SNAKE_CASE )
return failure
if __name__ == "__main__":
# Test 1)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12'
__SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__SCREAMING_SNAKE_CASE : int = 'ABABX'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__SCREAMING_SNAKE_CASE : Any = 'AAAB'
__SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy'
__SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__SCREAMING_SNAKE_CASE : Any = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 347
| 1
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _UpperCamelCase ( datasets.BeamBasedBuilder ):
def lowercase ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_SCREAMING_SNAKE_CASE , )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Tuple ) -> Dict:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Dict ) -> int:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_SCREAMING_SNAKE_CASE )
class _UpperCamelCase ( datasets.BeamBasedBuilder ):
def lowercase ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_SCREAMING_SNAKE_CASE , )
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple ) -> Any:
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] ) -> Union[str, Any]:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( ) -> str:
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def lowerCAmelCase_ ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class _UpperCamelCase ( lowerCAmelCase_ ):
@require_beam
def lowercase ( self: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def lowercase ( self: Dict ) -> Union[str, Any]:
"""simple docstring"""
import apache_beam as beam
UpperCamelCase_ = beam.io.parquetio.WriteToParquet
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCamelCase_ = partial(_SCREAMING_SNAKE_CASE , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _SCREAMING_SNAKE_CASE )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def lowercase ( self: Any ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowercase ( self: int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = NestedBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 353
|
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list:
UpperCamelCase_ = int(UpperCamelCase_ )
if n_element < 1:
UpperCamelCase_ = ValueError("a should be a positive number" )
raise my_error
UpperCamelCase_ = [1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = (0, 0, 0)
UpperCamelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_UpperCAmelCase = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
_UpperCAmelCase = hamming(int(n))
print('-----------------------------------------------------')
print(f'''The list with nth numbers is: {hamming_numbers}''')
print('-----------------------------------------------------')
| 328
| 0
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_a : int = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __A :
_UpperCamelCase : Dict = PegasusConfig
_UpperCamelCase : List[Any] = {}
_UpperCamelCase : Any = "gelu"
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=False , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__=0.1 , a__=0.1 , a__=20 , a__=2 , a__=1 , a__=0 , ):
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = max_position_embeddings
_lowerCAmelCase : List[str] = eos_token_id
_lowerCAmelCase : List[str] = pad_token_id
_lowerCAmelCase : Tuple = bos_token_id
def __A ( self ):
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_lowerCAmelCase : List[Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCAmelCase : Dict = np.concatenate([input_ids, eos_tensor] , axis=1 )
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCAmelCase : List[Any] = prepare_pegasus_inputs_dict(a__ , a__ , a__ )
return config, inputs_dict
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : str = 20
_lowerCAmelCase : Tuple = model_class_name(a__ )
_lowerCAmelCase : Optional[Any] = model.encode(inputs_dict["""input_ids"""] )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_lowerCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_lowerCAmelCase : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCAmelCase : Tuple = model.decode(
decoder_input_ids[:, :-1] , a__ , decoder_attention_mask=a__ , past_key_values=a__ , decoder_position_ids=a__ , )
_lowerCAmelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_lowerCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, -1:] , a__ , decoder_attention_mask=a__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a__ , )
_lowerCAmelCase : Optional[int] = model.decode(a__ , a__ )
_lowerCAmelCase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = 20
_lowerCAmelCase : Optional[Any] = model_class_name(a__ )
_lowerCAmelCase : Tuple = model.encode(inputs_dict["""input_ids"""] )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_lowerCAmelCase : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowerCAmelCase : List[Any] = model.init_cache(decoder_input_ids.shape[0] , a__ , a__ )
_lowerCAmelCase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCAmelCase : int = model.decode(
decoder_input_ids[:, :-1] , a__ , decoder_attention_mask=a__ , past_key_values=a__ , decoder_position_ids=a__ , )
_lowerCAmelCase : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_lowerCAmelCase : int = model.decode(
decoder_input_ids[:, -1:] , a__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a__ , decoder_position_ids=a__ , )
_lowerCAmelCase : Any = model.decode(a__ , a__ , decoder_attention_mask=a__ )
_lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : List[str] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any]=None ,_lowerCamelCase : Dict=None ,) -> Union[str, Any]:
if attention_mask is None:
_lowerCAmelCase : int = np.not_equal(_lowerCamelCase ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_lowerCAmelCase : Union[str, Any] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : int = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_UpperCamelCase : Tuple = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_UpperCamelCase : str = True
_UpperCamelCase : str = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : str = False
def __A ( self ):
_lowerCAmelCase : Dict = FlaxPegasusModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self , config_class=a__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a__ , a__ , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a__ , a__ , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : List[str] = self._prepare_for_class(a__ , a__ )
_lowerCAmelCase : List[Any] = model_class(a__ )
@jax.jit
def encode_jitted(a__ , a__=None , **a__ ):
return model.encode(input_ids=a__ , attention_mask=a__ )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase : List[str] = encode_jitted(**a__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase : Optional[Any] = encode_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : str = model_class(a__ )
_lowerCAmelCase : Optional[int] = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_lowerCAmelCase : Optional[Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(a__ , a__ , a__ ):
return model.decode(
decoder_input_ids=a__ , decoder_attention_mask=a__ , encoder_outputs=a__ , )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase : Optional[int] = decode_jitted(**a__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase : Tuple = decode_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __A ( self ):
for model_class_name in self.all_model_classes:
_lowerCAmelCase : Any = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=a__ )
_lowerCAmelCase : List[str] = np.ones((1, 1) )
_lowerCAmelCase : Optional[Any] = model(a__ )
self.assertIsNotNone(a__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_lowerCAmelCase : int = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_lowerCAmelCase : List[str] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_lowerCAmelCase : List[Any] = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_lowerCAmelCase : Optional[int] = tokenizer(a__ , return_tensors="""np""" , truncation=a__ , max_length=512 , padding=a__ )
_lowerCAmelCase : Optional[Any] = model.generate(**a__ , num_beams=2 ).sequences
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(a__ , skip_special_tokens=a__ )
assert tgt_text == decoded
| 44
|
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE = 1_000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 153
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : Optional[Any] = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'distilbert'
__SCREAMING_SNAKE_CASE : Dict = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=512 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=768 , _lowerCamelCase=4 * 768 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0_2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ) ->List[Any]:
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = sinusoidal_pos_embds
SCREAMING_SNAKE_CASE : str = n_layers
SCREAMING_SNAKE_CASE : Tuple = n_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = dim
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dim
SCREAMING_SNAKE_CASE : Dict = dropout
SCREAMING_SNAKE_CASE : int = attention_dropout
SCREAMING_SNAKE_CASE : int = activation
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : str = qa_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 360
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , *_lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ) ->int:
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = eval_examples
SCREAMING_SNAKE_CASE : Optional[int] = post_process_function
def __lowerCAmelCase ( self , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = "eval" , **_lowerCamelCase , ) ->Dict[str, float]:
SCREAMING_SNAKE_CASE : Any = gen_kwargs.copy()
SCREAMING_SNAKE_CASE : str = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
SCREAMING_SNAKE_CASE : Dict = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
SCREAMING_SNAKE_CASE : Any = gen_kwargs
SCREAMING_SNAKE_CASE : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE : str = self.get_eval_dataloader(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : Optional[Any] = self.compute_metrics
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
SCREAMING_SNAKE_CASE : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE : Tuple = eval_loop(
_lowerCamelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
SCREAMING_SNAKE_CASE : Dict = compute_metrics
SCREAMING_SNAKE_CASE : Tuple = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE : Tuple = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
SCREAMING_SNAKE_CASE : Optional[int] = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCamelCase )
return metrics
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase = "test" , **_lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : str = gen_kwargs.copy()
SCREAMING_SNAKE_CASE : str = self.get_test_dataloader(_lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : Dict = self.compute_metrics
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : List[str] = time.time()
SCREAMING_SNAKE_CASE : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE : Any = eval_loop(
_lowerCamelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
SCREAMING_SNAKE_CASE : Optional[int] = compute_metrics
SCREAMING_SNAKE_CASE : List[Any] = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE : Tuple = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , '''predict''' )
SCREAMING_SNAKE_CASE : Dict = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
SCREAMING_SNAKE_CASE : List[Any] = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCamelCase )
| 19
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = BertConfig.from_json_file(UpperCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCamelCase_ : int = BertForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase_ )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 318
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_SCREAMING_SNAKE_CASE = {
"""allenai/led-base-16384""": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCamelCase = bs[:]
UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
return pairs
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="replace" , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ):
"""simple docstring"""
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase = json.load(lowerCamelCase_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = errors # how to handle errors in decoding
UpperCamelCase = bytes_to_unicode()
UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase = {}
UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(lowerCamelCase_ ):
try:
UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCamelCase = get_pairs(lowerCamelCase_ )
UpperCamelCase = """ """.join(lowerCamelCase_ )
UpperCamelCase = word
return word
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = []
for token in re.findall(self.pat , lowerCamelCase_ ):
UpperCamelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = """""".join(lowerCamelCase_ )
UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCamelCase = 0
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCamelCase = """ """ + text
return (text, kwargs)
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , ):
"""simple docstring"""
UpperCamelCase = super()._pad(
encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCamelCase = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 343
| 0
|
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCamelCase ():
__a : Dict = HfArgumentParser(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = parser.parse_args_into_dataclasses()[0]
__a : Tuple = TensorFlowBenchmark(args=_SCREAMING_SNAKE_CASE )
try:
__a : List[str] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__a : List[Any] = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
__a : Optional[int] = ' '.join(str(_SCREAMING_SNAKE_CASE ).split(' ' )[:-1] )
__a : Optional[int] = ''
__a : List[Any] = eval(str(_SCREAMING_SNAKE_CASE ).split(' ' )[-1] )
__a : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
__a : Optional[int] = full_error_msg + begin_error_msg + str(_SCREAMING_SNAKE_CASE )
raise ValueError(_SCREAMING_SNAKE_CASE )
benchmark.run()
if __name__ == "__main__":
main()
| 294
|
'''simple docstring'''
__lowercase : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__lowercase : List[str] = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Any = start
# add current to visited
visited.append(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__a : Dict = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(_SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__a : List[Any] = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
__lowercase : Union[str, Any] = topological_sort('a', [], [])
print(sort)
| 294
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = PILImageResampling.BICUBIC, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = True, lowerCAmelCase__ = 1 / 255, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = True, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = size if size is not None else {'shortest_edge': 224}
snake_case_ = get_size_dict(lowerCAmelCase__, default_to_square=lowerCAmelCase__)
snake_case_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
snake_case_ = get_size_dict(lowerCAmelCase__, default_to_square=lowerCAmelCase__, param_name='crop_size')
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case_ = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case_ = do_convert_rgb
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = PILImageResampling.BICUBIC, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
snake_case_ = get_size_dict(lowerCAmelCase__, default_to_square=lowerCAmelCase__)
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}')
snake_case_ = get_resize_output_image_size(lowerCAmelCase__, size=size['shortest_edge'], default_to_square=lowerCAmelCase__)
return resize(lowerCAmelCase__, size=lowerCAmelCase__, resample=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
snake_case_ = get_size_dict(lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}')
return center_crop(lowerCAmelCase__, size=(size['height'], size['width']), data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> Optional[int]:
return rescale(lowerCAmelCase__, scale=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> PIL.Image.Image:
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(lowerCAmelCase__, param_name='size', default_to_square=lowerCAmelCase__)
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(lowerCAmelCase__, param_name='crop_size', default_to_square=lowerCAmelCase__)
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
snake_case_ = [self.resize(image=lowerCAmelCase__, size=lowerCAmelCase__, resample=lowerCAmelCase__) for image in images]
if do_center_crop:
snake_case_ = [self.center_crop(image=lowerCAmelCase__, size=lowerCAmelCase__) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=lowerCAmelCase__, scale=lowerCAmelCase__) for image in images]
if do_normalize:
snake_case_ = [self.normalize(image=lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__) for image in images]
snake_case_ = [to_channel_dimension_format(lowerCAmelCase__, lowerCAmelCase__) for image in images]
snake_case_ = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase__, tensor_type=lowerCAmelCase__)
| 69
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ = 10**9 ):
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowercase : Any = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = ['''pixel_values''']
def __init__( self , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 1 / 2_55 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : int = size if size is not None else {'''shortest_edge''': 2_56}
lowercase_ : int = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
lowercase_ : int = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
lowercase_ : Any = get_size_dict(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = do_resize
lowercase_ : Optional[int] = size
lowercase_ : int = resample
lowercase_ : Optional[int] = do_center_crop
lowercase_ : str = crop_size
lowercase_ : Dict = do_rescale
lowercase_ : int = rescale_factor
lowercase_ : Tuple = do_normalize
lowercase_ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase_ : int = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=__SCREAMING_SNAKE_CASE )
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : List[str] = get_size_dict(__SCREAMING_SNAKE_CASE )
return center_crop(__SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : int = do_resize if do_resize is not None else self.do_resize
lowercase_ : List[str] = size if size is not None else self.size
lowercase_ : Any = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = resample if resample is not None else self.resample
lowercase_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowercase_ : List[Any] = get_size_dict(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : str = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Tuple = image_mean if image_mean is not None else self.image_mean
lowercase_ : int = image_std if image_std is not None else self.image_std
lowercase_ : Tuple = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase_ : List[Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowercase_ : List[str] = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
lowercase_ : Optional[int] = [self.center_crop(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase_ : List[str] = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowercase_ : List[Any] = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
lowercase_ : Optional[int] = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
lowercase_ : str = {'''pixel_values''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
| 264
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowercase : Any = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 264
| 1
|
from copy import deepcopy
class __snake_case :
def __init__( self : List[str] , _snake_case : list[int] | None = None , _snake_case : int | None = None):
"""simple docstring"""
if arr is None and size is not None:
UpperCAmelCase_ = size
UpperCAmelCase_ = [0] * size
elif arr is not None:
self.init(_snake_case)
else:
raise ValueError('''Either arr or size must be specified''')
def lowerCamelCase ( self : Tuple , _snake_case : list[int]):
"""simple docstring"""
UpperCAmelCase_ = len(_snake_case)
UpperCAmelCase_ = deepcopy(_snake_case)
for i in range(1 , self.size):
UpperCAmelCase_ = self.next_(_snake_case)
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tree[:]
for i in range(self.size - 1 , 0 , -1):
UpperCAmelCase_ = self.next_(_snake_case)
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase ( _snake_case : int):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def lowerCamelCase ( _snake_case : int):
"""simple docstring"""
return index - (index & (-index))
def lowerCamelCase ( self : Tuple , _snake_case : int , _snake_case : int):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCAmelCase_ = self.next_(_snake_case)
def lowerCamelCase ( self : Tuple , _snake_case : int , _snake_case : int):
"""simple docstring"""
self.add(_snake_case , value - self.get(_snake_case))
def lowerCamelCase ( self : str , _snake_case : int):
"""simple docstring"""
if right == 0:
return 0
UpperCAmelCase_ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCAmelCase_ = self.prev(_snake_case)
return result
def lowerCamelCase ( self : Dict , _snake_case : int , _snake_case : int):
"""simple docstring"""
return self.prefix(_snake_case) - self.prefix(_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : int):
"""simple docstring"""
return self.query(_snake_case , index + 1)
def lowerCamelCase ( self : Tuple , _snake_case : int):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
UpperCAmelCase_ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCAmelCase_ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __lowerCAmelCase ( pl.LightningModule ):
def __init__( self :Union[str, Any] , __magic_name__ :Optional[int] ):
'''simple docstring'''
super().__init__()
a = model
a = 2
a = nn.Linear(self.model.config.hidden_size , self.num_labels )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
pass
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
# load longformer model from model identifier
a = LongformerModel.from_pretrained(__lowerCamelCase )
a = LightningModel(__lowerCamelCase )
a = torch.load(__lowerCamelCase , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
a = LongformerForQuestionAnswering.from_pretrained(__lowerCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__lowerCamelCase )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 228
| 0
|
'''simple docstring'''
from collections.abc import Generator
def __lowercase ( ) -> Generator[int, None, None]:
'''simple docstring'''
_A , _A = 0, 1
while True:
_A , _A = b, a + b
yield b
def __lowercase ( __lowercase = 1000 ) -> int:
'''simple docstring'''
_A = 1
_A = fibonacci_generator()
while len(str(next(__lowercase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 174
|
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = ['''vqvae''']
def __init__( self : List[str] , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Mel , __UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , mel=__UpperCAmelCase , vqvae=__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , __UpperCAmelCase ) else 1000
@torch.no_grad()
def __call__( self : str , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = None , __UpperCAmelCase : np.ndarray = None , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = None , __UpperCAmelCase : torch.Generator = None , __UpperCAmelCase : float = 0 , __UpperCAmelCase : float = 0 , __UpperCAmelCase : torch.Generator = None , __UpperCAmelCase : float = 0 , __UpperCAmelCase : torch.Tensor = None , __UpperCAmelCase : torch.Tensor = None , __UpperCAmelCase : Dict=True , ):
'''simple docstring'''
_A = steps or self.get_default_steps()
self.scheduler.set_timesteps(__UpperCAmelCase )
_A = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_A = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_A = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__UpperCAmelCase , device=self.device , )
_A = noise
_A = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__UpperCAmelCase , __UpperCAmelCase )
_A = self.mel.audio_slice_to_image(__UpperCAmelCase )
_A = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_A = (input_image / 255) * 2 - 1
_A = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_A = self.vqvae.encode(torch.unsqueeze(__UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=__UpperCAmelCase )[0]
_A = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_A = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
_A = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_A = int(mask_start_secs * pixels_per_second )
_A = int(mask_end_secs * pixels_per_second )
_A = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __UpperCAmelCase ):
_A = self.unet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )["sample"]
else:
_A = self.unet(__UpperCAmelCase , __UpperCAmelCase )["sample"]
if isinstance(self.scheduler , __UpperCAmelCase ):
_A = self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , )["prev_sample"]
else:
_A = self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_A = mask[:, step, :, :mask_start]
if mask_end > 0:
_A = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_A = 1 / self.vqvae.config.scaling_factor * images
_A = self.vqvae.decode(__UpperCAmelCase )["sample"]
_A = (images / 2 + 0.5).clamp(0 , 1 )
_A = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_A = (images * 255).round().astype("uint8" )
_A = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__UpperCAmelCase , mode="RGB" ).convert("L" ) for _ in images) )
_A = [self.mel.image_to_audio(__UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__UpperCAmelCase ) )
@torch.no_grad()
def lowerCAmelCase ( self : str , __UpperCAmelCase : List[Image.Image] , __UpperCAmelCase : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , __UpperCAmelCase )
self.scheduler.set_timesteps(__UpperCAmelCase )
_A = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_A = (sample / 255) * 2 - 1
_A = torch.Tensor(__UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_A = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_A = self.scheduler.alphas_cumprod[t]
_A = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_A = 1 - alpha_prod_t
_A = self.unet(__UpperCAmelCase , __UpperCAmelCase )["sample"]
_A = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_A = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_A = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowerCAmelCase ( __UpperCAmelCase : torch.Tensor , __UpperCAmelCase : torch.Tensor , __UpperCAmelCase : float ):
'''simple docstring'''
_A = acos(torch.dot(torch.flatten(__UpperCAmelCase ) , torch.flatten(__UpperCAmelCase ) ) / torch.norm(__UpperCAmelCase ) / torch.norm(__UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(__UpperCAmelCase )
| 174
| 1
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : str = OmegaConf.load(_UpperCamelCase )
lowercase : Tuple = torch.load(_UpperCamelCase, map_location='''cpu''' )['''model''']
lowercase : Dict = list(state_dict.keys() )
# extract state_dict for VQVAE
lowercase : int = {}
lowercase : Any = '''first_stage_model.'''
for key in keys:
if key.startswith(_UpperCamelCase ):
lowercase : int = state_dict[key]
# extract state_dict for UNetLDM
lowercase : Union[str, Any] = {}
lowercase : Tuple = '''model.diffusion_model.'''
for key in keys:
if key.startswith(_UpperCamelCase ):
lowercase : Any = state_dict[key]
lowercase : List[Any] = config.model.params.first_stage_config.params
lowercase : List[str] = config.model.params.unet_config.params
lowercase : Optional[Any] = VQModel(**_UpperCamelCase ).eval()
vqvae.load_state_dict(_UpperCamelCase )
lowercase : Dict = UNetLDMModel(**_UpperCamelCase ).eval()
unet.load_state_dict(_UpperCamelCase )
lowercase : str = DDIMScheduler(
timesteps=config.model.params.timesteps, beta_schedule='''scaled_linear''', beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=_UpperCamelCase, )
lowercase : List[str] = LDMPipeline(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
pipeline.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
__a = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 337
|
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1
lowercase : List[Any] = n
lowercase : List[Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
lowercase : Union[str, Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = w
def __lowerCamelCase ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.dp[u][v]
if __name__ == "__main__":
__a = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337
| 1
|
"""simple docstring"""
from pathlib import Path
import fire
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Path(_UpperCamelCase )
__lowerCAmelCase = Path(_UpperCamelCase )
dest_dir.mkdir(exist_ok=_UpperCamelCase )
for path in src_dir.iterdir():
__lowerCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
__lowerCAmelCase = dest_dir.joinpath(path.name )
print(_UpperCamelCase )
dest_path.open("w" ).write("\n".join(_UpperCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 370
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [False] * len(_UpperCamelCase )
__lowerCAmelCase = []
queue.append(_UpperCamelCase )
__lowerCAmelCase = True
while queue:
__lowerCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
__lowerCAmelCase = True
__lowerCAmelCase = u
return visited[t]
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [-1] * (len(_UpperCamelCase ))
__lowerCAmelCase = 0
while bfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = float("Inf" )
__lowerCAmelCase = sink
while s != source:
# Find the minimum value in select path
__lowerCAmelCase = min(_UpperCamelCase , graph[parent[s]][s] )
__lowerCAmelCase = parent[s]
max_flow += path_flow
__lowerCAmelCase = sink
while v != source:
__lowerCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowerCAmelCase = parent[v]
return max_flow
A : Optional[Any] = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
A , A : Optional[Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 259
| 0
|
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = tf.convert_to_tensor(__lowerCAmelCase )
lowercase_ = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = tf.convert_to_tensor(__lowerCAmelCase )
lowercase_ = tf.cast(math.pi , x.dtype )
lowercase_ = tf.cast(0.044_715 , x.dtype )
lowercase_ = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__lowerCAmelCase , 3 )) ))
return x * cdf
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = tf.convert_to_tensor(__lowerCAmelCase )
return x * tf.tanh(tf.math.softplus(__lowerCAmelCase ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = tf.convert_to_tensor(__lowerCAmelCase )
lowercase_ = tf.cast(0.044_715 , x.dtype )
lowercase_ = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = tf.convert_to_tensor(__lowerCAmelCase )
lowercase_ = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return tf.clip_by_value(_gelu(__lowerCAmelCase ) , -10 , 10 )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=-1 ) -> Any:
'''simple docstring'''
lowercase_ = tf.split(__lowerCAmelCase , 2 , axis=__lowerCAmelCase )
return a * tf.math.sigmoid(__lowerCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
return tf.keras.activations.gelu(__lowerCAmelCase , approximate=__lowerCAmelCase )
UpperCAmelCase : Any = tf.keras.activations.gelu
UpperCAmelCase : Union[str, Any] = approximate_gelu_wrap
else:
UpperCAmelCase : List[str] = _gelu
UpperCAmelCase : Dict = _gelu_new
UpperCAmelCase : Optional[int] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 136
|
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =10
SCREAMING_SNAKE_CASE_: Dict =datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
SCREAMING_SNAKE_CASE_: Tuple =datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(lowercase ) ),
} , features=lowercase , )
return dataset
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowercase )
return filename
# FILE_CONTENT + files
_UpperCAmelCase = """\
Text data.
Second line of data."""
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """file.txt"""
SCREAMING_SNAKE_CASE_: str =FILE_CONTENT
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return filename
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
import bza
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =bytes(lowercase , """utf-8""" )
with bza.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
import gzip
SCREAMING_SNAKE_CASE_: List[str] =str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
SCREAMING_SNAKE_CASE_: Dict =bytes(lowercase , """utf-8""" )
with gzip.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
SCREAMING_SNAKE_CASE_: Tuple =tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
SCREAMING_SNAKE_CASE_: List[Any] =bytes(lowercase , """utf-8""" )
with lza.frame.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
SCREAMING_SNAKE_CASE_: Tuple =tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowercase , """w""" ) as archive:
archive.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import tarfile
SCREAMING_SNAKE_CASE_: List[Any] =tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
import lzma
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
SCREAMING_SNAKE_CASE_: List[Any] =bytes(lowercase , """utf-8""" )
with lzma.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import zipfile
SCREAMING_SNAKE_CASE_: str =tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
SCREAMING_SNAKE_CASE_: Tuple =tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
SCREAMING_SNAKE_CASE_: Dict =bytes(lowercase , """utf-8""" )
with zstd.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =tmp_path_factory.mktemp("""data""" ) / """file.xml"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return filename
_UpperCAmelCase = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
_UpperCAmelCase = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
_UpperCAmelCase = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
_UpperCAmelCase = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
_UpperCAmelCase = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =datasets.Dataset.from_dict(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
SCREAMING_SNAKE_CASE_: int =con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowercase , """w""" , newline="""""" ) as f:
SCREAMING_SNAKE_CASE_: int =csv.DictWriter(lowercase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowercase , """w""" , newline="""""" ) as f:
SCREAMING_SNAKE_CASE_: Tuple =csv.DictWriter(lowercase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import bza
SCREAMING_SNAKE_CASE_: Optional[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowercase , """rb""" ) as f:
SCREAMING_SNAKE_CASE_: Optional[int] =f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowercase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
SCREAMING_SNAKE_CASE_: List[Any] =pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowercase , """wb""" ) as f:
SCREAMING_SNAKE_CASE_: int =pq.ParquetWriter(lowercase , schema=lowercase )
SCREAMING_SNAKE_CASE_: str =pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase ) )] for k in DATA[0]} , schema=lowercase )
writer.write_table(lowercase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""data""": DATA}
with open(lowercase , """w""" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
SCREAMING_SNAKE_CASE_: Tuple ={"""data""": DATA_DICT_OF_LISTS}
with open(lowercase , """w""" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import gzip
SCREAMING_SNAKE_CASE_: Any =str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowercase , """rb""" ) as orig_file:
with gzip.open(lowercase , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import gzip
SCREAMING_SNAKE_CASE_: int =str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowercase , """rb""" ) as orig_file:
with gzip.open(lowercase , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""nested""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.join("""nested""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE_: Dict =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowercase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
SCREAMING_SNAKE_CASE_: List[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 173
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=__lowerCAmelCase ).to(__lowerCAmelCase )
__UpperCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__UpperCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__UpperCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__UpperCamelCase = model(input_ids.to(__lowerCAmelCase ) , labels=labels.to(__lowerCAmelCase ) ).loss
__UpperCamelCase = -(labels.shape[-1] * loss.item())
__UpperCamelCase = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 358
|
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'num_attention_heads' ) )
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=16 , __UpperCAmelCase=[128, 256, 384] , __UpperCAmelCase=[4, 6, 8] , __UpperCAmelCase=[2, 3, 4] , __UpperCAmelCase=[16, 16, 16] , __UpperCAmelCase=0 , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = num_channels
__UpperCamelCase = kernel_size
__UpperCamelCase = stride
__UpperCamelCase = padding
__UpperCamelCase = hidden_sizes
__UpperCamelCase = num_attention_heads
__UpperCamelCase = depths
__UpperCamelCase = key_dim
__UpperCamelCase = drop_path_rate
__UpperCamelCase = patch_size
__UpperCamelCase = attention_ratio
__UpperCamelCase = mlp_ratio
__UpperCamelCase = initializer_range
__UpperCamelCase = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = num_labels
__UpperCamelCase = initializer_range
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = LevitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = (self.image_size, self.image_size)
__UpperCamelCase , __UpperCamelCase = image_size[0], image_size[1]
for _ in range(4 ):
__UpperCamelCase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__UpperCamelCase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = LevitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = LevitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCamelCase = outputs.hidden_states
__UpperCamelCase = len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
__UpperCamelCase = (self.model_tester.image_size, self.model_tester.image_size)
__UpperCamelCase , __UpperCamelCase = image_size[0], image_size[1]
for _ in range(4 ):
__UpperCamelCase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__UpperCamelCase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
__UpperCamelCase = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
__UpperCamelCase = problem_type['title']
__UpperCamelCase = problem_type['num_labels']
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if problem_type["num_labels"] > 1:
__UpperCamelCase = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
__UpperCamelCase = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCAmelCase ) as warning_list:
__UpperCamelCase = model(**__UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = LevitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> Union[str, Any]:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 263
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = ViTImageProcessor if is_vision_available() else None
@property
def _A ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : Optional[int] ):
_UpperCAmelCase : Any = (3, 32, 128)
_UpperCAmelCase : Any = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_UpperCAmelCase : List[str] = dict(zip(A , range(len(A ) ) ) )
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A ) + "\n" )
_UpperCAmelCase : str = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
_UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A , A )
def _A ( self : Optional[Any] , **A : List[Any] ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A )
def _A ( self : Dict , **A : List[str] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A )
def _A ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def _A ( self : str ):
_UpperCAmelCase : Union[str, Any] = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_UpperCAmelCase : Union[str, Any] = Image.fromarray(np.moveaxis(A , 0 , -1 ) )
return image_input
def _A ( self : List[Any] ):
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A , image_processor=A )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Union[str, Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=A )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def _A ( self : Optional[int] ):
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : str = MgpstrProcessor(tokenizer=A , image_processor=A )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : Dict = self.get_image_processor(do_normalize=A , padding_value=1.0 )
_UpperCAmelCase : Any = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def _A ( self : int ):
_UpperCAmelCase : int = self.get_image_processor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = MgpstrProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : int = self.prepare_image_inputs()
_UpperCAmelCase : str = image_processor(A , return_tensors="np" )
_UpperCAmelCase : List[Any] = processor(images=A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : str = MgpstrProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Any = "test"
_UpperCAmelCase : Tuple = processor(text=A )
_UpperCAmelCase : Union[str, Any] = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _A ( self : Any ):
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Any = "test"
_UpperCAmelCase : Union[str, Any] = self.prepare_image_inputs()
_UpperCAmelCase : str = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def _A ( self : List[str] ):
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : str = MgpstrProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : Optional[int] = processor.char_decode(A )
_UpperCAmelCase : Tuple = tokenizer.batch_decode(A )
_UpperCAmelCase : Union[str, Any] = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(A , A )
def _A ( self : Optional[int] ):
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : List[str] = MgpstrProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Any = None
_UpperCAmelCase : int = self.prepare_image_inputs()
_UpperCAmelCase : Dict = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Optional[int] = self.get_image_processor()
_UpperCAmelCase : List[str] = self.get_tokenizer()
_UpperCAmelCase : str = MgpstrProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : List[Any] = torch.randn(1 , 27 , 38 )
_UpperCAmelCase : List[Any] = torch.randn(1 , 27 , 50257 )
_UpperCAmelCase : List[str] = torch.randn(1 , 27 , 30522 )
_UpperCAmelCase : Any = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 31
|
'''simple docstring'''
from typing import Any
def UpperCamelCase_ ( _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : dict , _UpperCAmelCase : dict , _UpperCAmelCase : dict , ) -> list:
"""simple docstring"""
_validation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# Creates data structures and fill initial step
_UpperCAmelCase : dict = {}
_UpperCAmelCase : dict = {}
for state in states_space:
_UpperCAmelCase : Union[str, Any] = observations_space[0]
_UpperCAmelCase : Tuple = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
_UpperCAmelCase : List[str] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(_UpperCAmelCase ) ):
_UpperCAmelCase : Optional[Any] = observations_space[o]
_UpperCAmelCase : int = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_UpperCAmelCase : str = ""
_UpperCAmelCase : Tuple = -1
for k_state in states_space:
_UpperCAmelCase : Any = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_UpperCAmelCase : Union[str, Any] = probability
_UpperCAmelCase : str = k_state
# Update probabilities and pointers dicts
_UpperCAmelCase : Optional[int] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_UpperCAmelCase : Tuple = arg_max
# The final observation
_UpperCAmelCase : Optional[Any] = observations_space[len(_UpperCAmelCase ) - 1]
# argmax for given final observation
_UpperCAmelCase : List[str] = ""
_UpperCAmelCase : Any = -1
for k_state in states_space:
_UpperCAmelCase : Optional[int] = probabilities[(k_state, final_observation)]
if probability > max_probability:
_UpperCAmelCase : int = probability
_UpperCAmelCase : Dict = k_state
_UpperCAmelCase : Dict = arg_max
# Process pointers backwards
_UpperCAmelCase : List[Any] = last_state
_UpperCAmelCase : str = []
for o in range(len(_UpperCAmelCase ) - 1 , -1 , -1 ):
result.append(_UpperCAmelCase )
_UpperCAmelCase : List[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , ) -> None:
"""simple docstring"""
_validate_not_empty(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
_validate_lists(_UpperCAmelCase , _UpperCAmelCase )
_validate_dicts(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : Any ) -> None:
"""simple docstring"""
_validate_list(_UpperCAmelCase , "observations_space" )
_validate_list(_UpperCAmelCase , "states_space" )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
if not isinstance(_object , _UpperCAmelCase ):
_UpperCAmelCase : Optional[int] = F"""{var_name} must be a list"""
raise ValueError(_UpperCAmelCase )
else:
for x in _object:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase : Optional[int] = F"""{var_name} must be a list of strings"""
raise ValueError(_UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , ) -> None:
"""simple docstring"""
_validate_dict(_UpperCAmelCase , "initial_probabilities" , _UpperCAmelCase )
_validate_nested_dict(_UpperCAmelCase , "transition_probabilities" )
_validate_nested_dict(_UpperCAmelCase , "emission_probabilities" )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
_validate_dict(_object , _UpperCAmelCase , _UpperCAmelCase )
for x in _object.values():
_validate_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : type , _UpperCAmelCase : bool = False ) -> None:
"""simple docstring"""
if not isinstance(_object , _UpperCAmelCase ):
_UpperCAmelCase : Any = F"""{var_name} must be a dict"""
raise ValueError(_UpperCAmelCase )
if not all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for x in _object ):
_UpperCAmelCase : Tuple = F"""{var_name} all keys must be strings"""
raise ValueError(_UpperCAmelCase )
if not all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for x in _object.values() ):
_UpperCAmelCase : List[str] = "nested dictionary " if nested else ""
_UpperCAmelCase : List[str] = F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 31
| 1
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__lowercase = '''
import os
'''
__lowercase = '''
def foo():
import os
return False
'''
__lowercase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
__lowercase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
__lowercase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''test_file.py''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as _tmp_file:
_tmp_file.write(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = get_imports(SCREAMING_SNAKE_CASE )
assert parsed_imports == ["os"]
| 105
|
from __future__ import annotations
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(SCREAMING_SNAKE_CASE ):
print(f"""{i}\t\t{d}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for j in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = [float('''inf''' )] * vertex_count
__UpperCamelCase :str = 0.0
for _ in range(vertex_count - 1 ):
for j in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__UpperCamelCase :Any = distance[u] + w
__UpperCamelCase :Tuple = check_negative_cycle(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase = int(input('''Enter number of vertices: ''').strip())
__lowercase = int(input('''Enter number of edges: ''').strip())
__lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
__lowercase , __lowercase , __lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
__lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
__lowercase = int(input('''\nEnter shortest path source:''').strip())
__lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 105
| 1
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : list[list[int]] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def a__ ( SCREAMING_SNAKE_CASE : list[list[int]] , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if curr_ind == len(SCREAMING_SNAKE_CASE ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(SCREAMING_SNAKE_CASE ) ):
if valid_connection(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Insert current vertex into path as next transition
lowerCAmelCase : int = next_ver
# Validate created path
if util_hamilton_cycle(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr_ind + 1 ):
return True
# Backtrack
lowerCAmelCase : List[Any] = -1
return False
def a__ ( SCREAMING_SNAKE_CASE : list[list[int]] , SCREAMING_SNAKE_CASE : int = 0 ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = [-1] * (len(SCREAMING_SNAKE_CASE ) + 1)
# initialize start and end of path with starting index
lowerCAmelCase : Union[str, Any] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 ) else []
| 108
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : str = """laion/clap-htsat-unfused"""
_UpperCAmelCase : int = tempfile.mkdtemp()
def _snake_case ( self ,**a_ ) -> str:
return RobertaTokenizer.from_pretrained(self.checkpoint ,**a_ )
def _snake_case ( self ,**a_ ) -> Tuple:
return ClapFeatureExtractor.from_pretrained(self.checkpoint ,**a_ )
def _snake_case ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Any = self.get_feature_extractor()
_UpperCAmelCase : int = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,a_ )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,a_ )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : int = ClapProcessor(tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[str] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_UpperCAmelCase : List[Any] = self.get_feature_extractor(do_normalize=a_ ,padding_value=1.0 )
_UpperCAmelCase : Optional[Any] = ClapProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=a_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,a_ )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,a_ )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Tuple = self.get_feature_extractor()
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : str = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
_UpperCAmelCase : Tuple = floats_list((3, 1_000) )
_UpperCAmelCase : int = feature_extractor(a_ ,return_tensors="""np""" )
_UpperCAmelCase : Union[str, Any] = processor(audios=a_ ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : List[Any] = self.get_feature_extractor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
_UpperCAmelCase : Union[str, Any] = """This is a test string"""
_UpperCAmelCase : Optional[Any] = processor(text=a_ )
_UpperCAmelCase : Any = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : str = self.get_feature_extractor()
_UpperCAmelCase : List[str] = self.get_tokenizer()
_UpperCAmelCase : Any = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
_UpperCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : Dict = processor.batch_decode(a_ )
_UpperCAmelCase : Any = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ ,a_ )
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : List[str] = self.get_feature_extractor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Dict = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
self.assertListEqual(
processor.model_input_names[2:] ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,)
| 215
| 0
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=3_2 , A=3 , A=4 , A=[1_0, 2_0, 3_0, 4_0] , A=[2, 2, 3, 2] , A=True , A=True , A=3_7 , A="gelu" , A=1_0 , A=0.02 , A=["stage2", "stage3", "stage4"] , A=3 , A=None , ) -> Optional[Any]:
snake_case : List[str] = parent
snake_case : Union[str, Any] = batch_size
snake_case : Tuple = image_size
snake_case : Optional[int] = num_channels
snake_case : List[str] = num_stages
snake_case : List[Any] = hidden_sizes
snake_case : Optional[Any] = depths
snake_case : List[str] = is_training
snake_case : str = use_labels
snake_case : Any = intermediate_size
snake_case : Dict = hidden_act
snake_case : List[Any] = type_sequence_label_size
snake_case : List[str] = initializer_range
snake_case : Tuple = out_features
snake_case : Dict = num_labels
snake_case : List[str] = scope
snake_case : Optional[Any] = num_stages
def UpperCAmelCase ( self ) -> int:
snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Tuple = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> str:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCAmelCase ( self ) -> Dict:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=_UpperCamelCase , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Union[str, Any] = UperNetForSemanticSegmentation(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case : List[Any] = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase ( self ) -> Dict:
snake_case : List[str] = self.prepare_config_and_inputs()
(
snake_case
) : Union[str, Any] = config_and_inputs
snake_case : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_snake_case = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Tuple = UperNetModelTester(self )
snake_case : List[Any] = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=3_7 )
def UpperCAmelCase ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self ) -> List[str]:
return
def UpperCAmelCase ( self ) -> str:
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : List[Any] = model_class(_UpperCamelCase )
snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Optional[Any] = [*signature.parameters.keys()]
snake_case : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def UpperCAmelCase ( self ) -> int:
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCAmelCase ( self ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase ( self ) -> Optional[int]:
pass
def UpperCAmelCase ( self ) -> int:
def check_hidden_states_output(A , A , A ):
snake_case : Any = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case : List[Any] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
snake_case : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case : Any = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : List[Any] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : List[str] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : str = _config_zero_init(_UpperCamelCase )
snake_case : str = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
snake_case : str = model_class(config=_UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def UpperCAmelCase ( self ) -> int:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[Any] = UperNetForSemanticSegmentation.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
snake_case : Tuple = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" ,repo_type="""dataset""" ,filename="""ADE_val_00000001.jpg""" )
snake_case : Optional[Any] = Image.open(lowerCamelCase_ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
snake_case : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_UpperCamelCase )
snake_case : Optional[int] = prepare_img()
snake_case : int = processor(images=_UpperCamelCase , return_tensors="""pt""" ).to(_UpperCamelCase )
with torch.no_grad():
snake_case : str = model(**_UpperCamelCase )
snake_case : List[str] = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
snake_case : Dict = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
def UpperCAmelCase ( self ) -> Dict:
snake_case : int = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
snake_case : List[Any] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_UpperCamelCase )
snake_case : Optional[int] = prepare_img()
snake_case : Tuple = processor(images=_UpperCamelCase , return_tensors="""pt""" ).to(_UpperCamelCase )
with torch.no_grad():
snake_case : Optional[Any] = model(**_UpperCamelCase )
snake_case : Tuple = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
snake_case : Optional[int] = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
| 358
|
# using dfs for finding eulerian path traversal
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase=None ) -> Any:
snake_case : Union[str, Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
snake_case , snake_case : int = True, True
snake_case : List[Any] = dfs(lowercase ,lowercase ,lowercase ,lowercase )
return path
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Tuple:
snake_case : Union[str, Any] = 0
snake_case : Union[str, Any] = -1
for i in range(lowercase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
snake_case : str = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[str]:
snake_case : Tuple = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
snake_case , snake_case : Any = check_circuit_or_path(lowercase ,lowercase )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
snake_case : str = 1
if check == 2:
snake_case : Optional[int] = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
snake_case : Dict = dfs(lowercase ,lowercase ,lowercase )
print(lowercase )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
snake_case : Union[str, Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
snake_case : Optional[int] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
snake_case : Optional[int] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
snake_case : List[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
snake_case : Tuple = {
1: [],
2: []
# all degree is zero
}
snake_case : Tuple = 10
check_euler(lowercase ,lowercase )
check_euler(lowercase ,lowercase )
check_euler(lowercase ,lowercase )
check_euler(lowercase ,lowercase )
check_euler(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 176
| 0
|
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
lowercase__ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
for attribute in key.split('.' ):
UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
UpperCAmelCase : int = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
UpperCAmelCase : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase : Dict = value
elif weight_type == "weight_g":
UpperCAmelCase : str = value
elif weight_type == "weight_v":
UpperCAmelCase : Tuple = value
elif weight_type == "bias":
UpperCAmelCase : List[Any] = value
else:
UpperCAmelCase : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = []
UpperCAmelCase : Dict = fairseq_model.state_dict()
UpperCAmelCase : List[Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase : Tuple = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCAmelCase : Any = True
if "*" in mapped_key:
UpperCAmelCase : Dict = name.split(UpperCAmelCase_ )[0].split('.' )[-2]
UpperCAmelCase : Any = mapped_key.replace('*' , UpperCAmelCase_ )
if "weight_g" in name:
UpperCAmelCase : Optional[int] = 'weight_g'
elif "weight_v" in name:
UpperCAmelCase : List[Any] = 'weight_v'
elif "bias" in name:
UpperCAmelCase : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : Any = 'weight'
else:
UpperCAmelCase : Tuple = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : str = full_name.split('conv_layers.' )[-1]
UpperCAmelCase : Union[str, Any] = name.split('.' )
UpperCAmelCase : List[Any] = int(items[0] )
UpperCAmelCase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase_ )
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=True ):
if config_path is not None:
UpperCAmelCase : Optional[int] = UniSpeechSatConfig.from_pretrained(UpperCAmelCase_ )
else:
UpperCAmelCase : Tuple = UniSpeechSatConfig()
UpperCAmelCase : Optional[int] = ''
if is_finetuned:
UpperCAmelCase : Any = UniSpeechSatForCTC(UpperCAmelCase_ )
else:
UpperCAmelCase : Union[str, Any] = UniSpeechSatForPreTraining(UpperCAmelCase_ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
UpperCAmelCase : int = model[0].eval()
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ )
hf_wavavec.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase__ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 151
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowercase__ = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
lowercase__ = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
lowercase__ = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int=None , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=False ) -> Any:
if concatenate_texts:
return compute_measures(lowercase_ , lowercase_ )["wer"]
else:
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : List[Any] = 0
for prediction, reference in zip(lowercase_ , lowercase_ ):
UpperCAmelCase : str = compute_measures(lowercase_ , lowercase_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 151
| 1
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase__ : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 16000 ):
__UpperCAmelCase : str = int(round(sample_rate * max_length ) )
if len(SCREAMING_SNAKE_CASE__ ) <= sample_length:
return wav
__UpperCAmelCase : List[Any] = randint(0, len(SCREAMING_SNAKE_CASE__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE = field(default=lowerCamelCase_ ,metadata={'''help''': '''Name of a dataset from the datasets package'''} )
SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ ,metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ ,metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
SCREAMING_SNAKE_CASE = field(
default='''train''' ,metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} ,)
SCREAMING_SNAKE_CASE = field(
default='''validation''' ,metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} ,)
SCREAMING_SNAKE_CASE = field(
default='''audio''' ,metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} ,)
SCREAMING_SNAKE_CASE = field(
default='''label''' ,metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ ,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} ,)
SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ ,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} ,)
SCREAMING_SNAKE_CASE = field(
default=20 ,metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} ,)
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE = field(
default='''facebook/wav2vec2-base''' ,metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ,)
SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
SCREAMING_SNAKE_CASE = field(
default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,)
SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ ,metadata={'''help''': '''Name or path of preprocessor config.'''} )
SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ ,metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ ,metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ ,metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} ,)
SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ ,metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ ,metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} ,)
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , _UpperCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def __UpperCamelCase ( ):
__UpperCAmelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification", SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__UpperCAmelCase : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
__UpperCAmelCase : Dict = DatasetDict()
__UpperCAmelCase : List[Any] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name, use_auth_token=True if model_args.use_auth_token else None, )
__UpperCAmelCase : str = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.eval_split_name, use_auth_token=True if model_args.use_auth_token else None, )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. "
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. "
"Make sure to set `--label_column_name` to the correct text column - one of "
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__UpperCAmelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path, return_attention_mask=model_args.attention_mask, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__UpperCAmelCase : List[Any] = raw_datasets.cast_column(
data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__UpperCAmelCase : int = feature_extractor.model_input_names[0]
def train_transforms(_UpperCAmelCase ):
__UpperCAmelCase : Dict = []
for audio in batch[data_args.audio_column_name]:
__UpperCAmelCase : Tuple = random_subsample(
audio["array"], max_length=data_args.max_length_seconds, sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : Optional[int] = feature_extractor(SCREAMING_SNAKE_CASE__, sampling_rate=feature_extractor.sampling_rate )
__UpperCAmelCase : List[str] = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE__ )}
__UpperCAmelCase : Optional[int] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCAmelCase ):
__UpperCAmelCase : Tuple = [audio["array"] for audio in batch[data_args.audio_column_name]]
__UpperCAmelCase : Optional[int] = feature_extractor(SCREAMING_SNAKE_CASE__, sampling_rate=feature_extractor.sampling_rate )
__UpperCAmelCase : str = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE__ )}
__UpperCAmelCase : str = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__UpperCAmelCase : Optional[Any] = raw_datasets["train"].features[data_args.label_column_name].names
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = {}, {}
for i, label in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCAmelCase : Dict = str(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : Dict = label
# Load the accuracy metric from the datasets package
__UpperCAmelCase : List[str] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCAmelCase ):
__UpperCAmelCase : List[Any] = np.argmax(eval_pred.predictions, axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE__, references=eval_pred.label_ids )
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(SCREAMING_SNAKE_CASE__ ), labelaid=SCREAMING_SNAKE_CASE__, idalabel=SCREAMING_SNAKE_CASE__, finetuning_task="audio-classification", cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
__UpperCAmelCase : Dict = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__UpperCAmelCase : List[Any] = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(SCREAMING_SNAKE_CASE__, output_all_columns=SCREAMING_SNAKE_CASE__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__UpperCAmelCase : str = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(SCREAMING_SNAKE_CASE__, output_all_columns=SCREAMING_SNAKE_CASE__ )
# Initialize our trainer
__UpperCAmelCase : List[Any] = Trainer(
model=SCREAMING_SNAKE_CASE__, args=SCREAMING_SNAKE_CASE__, train_dataset=raw_datasets["train"] if training_args.do_train else None, eval_dataset=raw_datasets["eval"] if training_args.do_eval else None, compute_metrics=SCREAMING_SNAKE_CASE__, tokenizer=SCREAMING_SNAKE_CASE__, )
# Training
if training_args.do_train:
__UpperCAmelCase : Dict = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase : Tuple = last_checkpoint
__UpperCAmelCase : List[Any] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
trainer.log_metrics("train", train_result.metrics )
trainer.save_metrics("train", train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__UpperCAmelCase : List[str] = trainer.evaluate()
trainer.log_metrics("eval", SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("eval", SCREAMING_SNAKE_CASE__ )
# Write model card and (optionally) push to hub
__UpperCAmelCase : str = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 366
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
def __init__( self : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 37
| 0
|
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase ( __A ):
def __init__(self : Optional[int] , *_A : Any , _A : Dict=None , _A : Any=None , **_A : int) -> Any:
super().__init__(*_A , **_A)
__snake_case : int = eval_examples
__snake_case : Any = post_process_function
def _lowercase (self : Tuple , _A : Union[str, Any] = None , _A : List[Any]=None , _A : str = None , _A : Union[str, Any] = "eval" , **_A : Tuple , ) -> Any:
__snake_case : Optional[Any] = gen_kwargs.copy()
__snake_case : Any = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
__snake_case : Tuple = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
__snake_case : Dict = gen_kwargs
__snake_case : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case : int = self.get_eval_dataloader(_A)
__snake_case : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case : Union[str, Any] = self.compute_metrics
__snake_case : str = None
__snake_case : List[str] = time.time()
__snake_case : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case : List[Any] = eval_loop(
_A , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , )
finally:
__snake_case : Any = compute_metrics
__snake_case : Optional[int] = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case : Optional[int] = self.post_process_function(_A , _A , _A)
__snake_case : List[str] = self.compute_metrics(_A)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
__snake_case : Optional[int] = metrics.pop(_A)
metrics.update(output.metrics)
else:
__snake_case : Optional[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_A)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__snake_case : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A)
return metrics
def _lowercase (self : List[Any] , _A : Tuple , _A : Optional[Any] , _A : Optional[Any]=None , _A : Union[str, Any] = "test" , **_A : Union[str, Any]) -> Optional[int]:
__snake_case : Dict = gen_kwargs.copy()
__snake_case : Optional[int] = self.get_test_dataloader(_A)
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case : int = self.compute_metrics
__snake_case : Dict = None
__snake_case : List[str] = time.time()
__snake_case : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case : List[Any] = eval_loop(
_A , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , )
finally:
__snake_case : Optional[int] = compute_metrics
__snake_case : Any = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case : Optional[int] = self.post_process_function(_A , _A , _A , 'predict')
__snake_case : Any = self.compute_metrics(_A)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
__snake_case : str = metrics.pop(_A)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A)
| 172
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_UpperCAmelCase = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
_UpperCAmelCase = """\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
"""
_UpperCAmelCase = """\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
"""
_UpperCAmelCase = """
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
'meteor': meteor score.
Examples:
>>> meteor = datasets.load_metric('meteor')
>>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]
>>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results[\"meteor\"], 4))
0.6944
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=0.9 , lowercase=3 , lowercase=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
A_ : List[Any] = [
meteor_score.single_meteor_score(
word_tokenize(lowercase ) , word_tokenize(lowercase ) , alpha=lowercase , beta=lowercase , gamma=lowercase )
for ref, pred in zip(lowercase , lowercase )
]
else:
A_ : Optional[Any] = [
meteor_score.single_meteor_score(lowercase , lowercase , alpha=lowercase , beta=lowercase , gamma=lowercase )
for ref, pred in zip(lowercase , lowercase )
]
return {"meteor": np.mean(lowercase )}
| 140
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: Optional[int] = MgpstrTokenizer
lowerCamelCase__: List[Any] = False
lowerCamelCase__: Optional[int] = {}
lowerCamelCase__: Dict = False
def _lowerCamelCase ( self: int ) -> str:
super().setUp()
# fmt: off
__UpperCAmelCase : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
__UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
def _lowerCamelCase ( self: Tuple , **__lowerCamelCase: Union[str, Any] ) -> Union[str, Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Dict ) -> List[str]:
__UpperCAmelCase : Optional[Any] = "tester"
__UpperCAmelCase : int = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
pass
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : List[str] = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
__UpperCAmelCase : Union[str, Any] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self: Dict ) -> List[Any]:
__UpperCAmelCase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : Union[str, Any] = self.get_input_output_texts(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase )
__UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertNotEqual(len(__lowerCamelCase ) , 0 )
__UpperCAmelCase : int = tokenizer.decode(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(text_a.replace(" " , "" ) , __lowerCamelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowerCamelCase ( self: Tuple ) -> List[str]:
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowerCamelCase ( self: List[Any] ) -> List[str]:
pass
| 363
|
import logging
import os
from .state import PartialState
class _snake_case ( logging.LoggerAdapter ):
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
__UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase )
if self.isEnabledFor(__lowerCamelCase ):
if self._should_log(__lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
elif in_order:
__UpperCAmelCase : Optional[int] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
state.wait_for_everyone()
def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]:
if log_level is None:
__UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ )
__UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case__, {} )
| 342
| 0
|
'''simple docstring'''
from __future__ import annotations
class __A :
def __init__(self : Optional[Any] , __a : list[list[int]] ):
UpperCAmelCase_ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(__a ) != 0:
UpperCAmelCase_ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__a ) != cols:
raise error
for value in row:
if not isinstance(__a , (int, float) ):
raise error
UpperCAmelCase_ = rows
else:
UpperCAmelCase_ = []
def _lowercase (self : Tuple ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowercase (self : List[Any] ):
return len(self.rows )
@property
def _lowercase (self : Tuple ):
return len(self.rows[0] )
@property
def _lowercase (self : Tuple ):
return (self.num_rows, self.num_columns)
@property
def _lowercase (self : str ):
return self.order[0] == self.order[1]
def _lowercase (self : int ):
UpperCAmelCase_ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__a )
def _lowercase (self : List[Any] ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowercase (self : int ):
return bool(self.determinant() )
def _lowercase (self : Union[str, Any] , __a : int , __a : int ):
UpperCAmelCase_ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__a ).determinant()
def _lowercase (self : Optional[int] , __a : int , __a : int ):
if (row + column) % 2 == 0:
return self.get_minor(__a , __a )
return -1 * self.get_minor(__a , __a )
def _lowercase (self : Optional[int] ):
return Matrix(
[
[self.get_minor(__a , __a ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowercase (self : Optional[int] ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__a )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__(self : Optional[int] ):
return str(self.rows )
def __str__(self : Optional[int] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__a ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def _lowercase (self : Dict , __a : list[int] , __a : int | None = None ):
UpperCAmelCase_ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(__a , __a ):
raise type_error
for value in row:
if not isinstance(__a , (int, float) ):
raise type_error
if len(__a ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(__a )
else:
UpperCAmelCase_ = self.rows[0:position] + [row] + self.rows[position:]
def _lowercase (self : Union[str, Any] , __a : list[int] , __a : int | None = None ):
UpperCAmelCase_ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(__a , __a ):
raise type_error
for value in column:
if not isinstance(__a , (int, float) ):
raise type_error
if len(__a ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCAmelCase_ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase_ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__(self : Optional[Any] , __a : object ):
if not isinstance(__a , __a ):
return NotImplemented
return self.rows == other.rows
def __ne__(self : Union[str, Any] , __a : object ):
return not self == other
def __neg__(self : Optional[Any] ):
return self * -1
def __add__(self : Union[str, Any] , __a : Matrix ):
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__(self : Tuple , __a : Matrix ):
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__(self : Dict , __a : Matrix | int | float ):
if isinstance(__a , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__a , __a ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(__a , __a ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__(self : Optional[Any] , __a : int ):
if not isinstance(__a , __a ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCAmelCase_ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowercase (cls : Any , __a : list[int] , __a : list[int] ):
return sum(row[i] * column[i] for i in range(len(__a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
|
'''simple docstring'''
import math
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = input("Enter message: " )
UpperCAmelCase_ = int(input(f"""Enter key [2-{len(snake_case_ ) - 1}]: """ ) )
UpperCAmelCase_ = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
UpperCAmelCase_ = encrypt_message(snake_case_ , snake_case_ )
elif mode.lower().startswith("d" ):
UpperCAmelCase_ = decrypt_message(snake_case_ , snake_case_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [""] * key
for col in range(snake_case_ ):
UpperCAmelCase_ = col
while pointer < len(snake_case_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = math.ceil(len(snake_case_ ) / key )
UpperCAmelCase_ = key
UpperCAmelCase_ = (num_cols * num_rows) - len(snake_case_ )
UpperCAmelCase_ = [""] * num_cols
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase_ = 0
row += 1
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1
| 1
|
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> None:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(lowerCamelCase__ ,""" -> """ ,""" -> """.join([str(lowerCamelCase__ ) for j in self.vertex[i]] ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> None:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowerCamelCase__ )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE = [to_vertex]
def SCREAMING_SNAKE_CASE__ ( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : list ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
print(lowerCamelCase__ ,end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowerCamelCase__ ,lowerCamelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 362
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Any = BertTokenizer
__snake_case : Dict = BertTokenizerFast
__snake_case : Tuple = True
__snake_case : List[Any] = True
__snake_case : Optional[Any] = filter_non_english
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase__ ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
# With lower casing
SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer()
SCREAMING_SNAKE_CASE = """a\n'll !!to?'d of, can't."""
SCREAMING_SNAKE_CASE = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=lowerCamelCase__ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ,lowerCamelCase__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(lowerCamelCase__ ,"""do_lower_case""" ) else False
SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCamelCase__ )
]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
| 193
| 0
|
import sys
import turtle
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(SCREAMING_SNAKE_CASE , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , depth - 1 )
triangle(SCREAMING_SNAKE_CASE , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , depth - 1 )
triangle(SCREAMING_SNAKE_CASE , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
__lowercase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
__lowercase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 43
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
__a = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__a = '▁'
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ : List[str] = BarthezTokenizer
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : str="<s>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : Dict="</s>" , lowerCAmelCase__ : Tuple="<s>" , lowerCAmelCase__ : Any="<unk>" , lowerCAmelCase__ : Any="<pad>" , lowerCAmelCase__ : List[str]="<mask>" , **lowerCAmelCase__ : Dict , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : Any = vocab_file
_UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.cls_token_id]
_UpperCAmelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_UpperCAmelCase : Any = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 145
| 0
|
"""simple docstring"""
_snake_case : Optional[int] = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_snake_case : List[str] = [{"type": "code", "content": INSTALL_CONTENT}]
_snake_case : str = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 355
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase ( self : Union[str, Any] ) -> Any:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def lowercase ( self : Optional[Any] ) -> List[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=1_0 , )
return model
@property
def lowercase ( self : Dict ) -> Optional[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
__lowerCAmelCase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__lowerCAmelCase = DDPMScheduler()
__lowerCAmelCase = AudioDiffusionPipeline(vqvae=lowerCAmelCase_ , unet=self.dummy_unet , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , steps=4 )
__lowerCAmelCase = output.audios[0]
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , steps=4 , return_dict=lowerCAmelCase_ )
__lowerCAmelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__lowerCAmelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__lowerCAmelCase = DDIMScheduler()
__lowerCAmelCase = self.dummy_vqvae_and_unet
__lowerCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
np.random.seed(0 )
__lowerCAmelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(raw_audio=lowerCAmelCase_ , generator=lowerCAmelCase_ , start_step=5 , steps=1_0 )
__lowerCAmelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__lowerCAmelCase = self.dummy_unet_condition
__lowerCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCAmelCase_ , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
np.random.seed(0 )
__lowerCAmelCase = torch.rand((1, 1, 1_0) )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , encoding=lowerCAmelCase_ )
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ )
__lowerCAmelCase = output.audios[0]
__lowerCAmelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 207
| 0
|
"""simple docstring"""
from math import isclose, sqrt
def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
lowercase_ : List[str] = point_y / 4 / point_x
lowercase_ : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowercase_ : List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowercase_ : List[str] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowercase_ : List[str] = outgoing_gradient**2 + 4
lowercase_ : Optional[int] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowercase_ : Any = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowercase_ : Any = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowercase_ : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowercase_ : Tuple = x_minus if isclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else x_plus
lowercase_ : List[str] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowercase__( __SCREAMING_SNAKE_CASE : float = 1.4 , __SCREAMING_SNAKE_CASE : float = -9.6 ):
lowercase_ : int = 0
lowercase_ : float = first_x_coord
lowercase_ : float = first_y_coord
lowercase_ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowercase_ , lowercase_ , lowercase_ : Dict = next_point(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"{solution() = }")
| 213
|
"""simple docstring"""
from __future__ import annotations
def lowercase__( __SCREAMING_SNAKE_CASE : list ):
if not nums:
raise ValueError('List is empty' )
return sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 213
| 1
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def A(__a: Union[str, Any] , __a: int , __a: Optional[Any] , __a: Any , __a: int ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
lowerCAmelCase_ = TapasConfig.from_json_file(__a )
# set absolute/relative position embeddings parameter
lowerCAmelCase_ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCAmelCase_ = TapasForQuestionAnswering(config=__a )
elif task == "WTQ":
# run_task_main.py hparams
lowerCAmelCase_ = 4
lowerCAmelCase_ = True
# hparam_utils.py hparams
lowerCAmelCase_ = 0.66_4694
lowerCAmelCase_ = 0.20_7951
lowerCAmelCase_ = 0.12_1194
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = 0.035_2513
lowerCAmelCase_ = TapasForQuestionAnswering(config=__a )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCAmelCase_ = 4
lowerCAmelCase_ = False
# hparam_utils.py hparams
lowerCAmelCase_ = 36.4519
lowerCAmelCase_ = 0.90_3421
lowerCAmelCase_ = 222.088
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = 0.76_3141
lowerCAmelCase_ = TapasForQuestionAnswering(config=__a )
elif task == "TABFACT":
lowerCAmelCase_ = TapasForSequenceClassification(config=__a )
elif task == "MLM":
lowerCAmelCase_ = TapasForMaskedLM(config=__a )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCAmelCase_ = TapasModel(config=__a )
else:
raise ValueError(F"Task {task} not supported." )
print(F"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__a , __a , __a )
# Save pytorch-model (weights and configuration)
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(__a )
# Save tokenizer files
print(F"Save tokenizer files to {pytorch_dump_path}" )
lowerCAmelCase_ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(__a )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 22
|
def A():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A(__a: list[list[int]] ):
assert all(row == sorted(__a , reverse=__a ) for row in grid )
assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) )
def A(__a: list[int] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(__a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase_ = (left + right) // 2
lowerCAmelCase_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase_ = mid + 1
else:
lowerCAmelCase_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__a )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(grid[0] )
for i in range(len(__a ) ):
lowerCAmelCase_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__a ) * len(grid[0] )) - total
def A(__a: list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
for row in grid:
for i, number in enumerate(__a ):
if number < 0:
total += len(__a ) - i
break
return total
def A():
from timeit import timeit
print("Running benchmarks" )
lowerCAmelCase_ = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 22
| 1
|
'''simple docstring'''
from __future__ import annotations
a__ : Any = []
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> bool:
"""simple docstring"""
for i in range(len(__UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(__UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__UpperCamelCase , -1 , -1 ) , range(__UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__UpperCamelCase , -1 , -1 ) , range(__UpperCamelCase , len(__UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> bool:
"""simple docstring"""
if row >= len(__UpperCamelCase ):
solution.append(__UpperCamelCase )
printboard(__UpperCamelCase )
print()
return True
for i in range(len(__UpperCamelCase ) ):
if is_safe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__A = 1
solve(__UpperCamelCase , row + 1 )
__A = 0
return False
def snake_case ( UpperCAmelCase )-> None:
"""simple docstring"""
for i in range(len(__UpperCamelCase ) ):
for j in range(len(__UpperCamelCase ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
a__ : Tuple = 8
a__ : int = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 161
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCamelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] = IFInpaintingSuperResolutionPipeline
a_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
a_ : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
a_ : int = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCamelCase ( self : Optional[Any] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase ( self : Optional[Any] , a_ : List[str] , a_ : Union[str, Any]=0 ):
if str(a_ ).startswith("mps" ):
lowerCAmelCase_ : List[Any] = torch.manual_seed(a_ )
else:
lowerCAmelCase_ : str = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCAmelCase_ : List[str] = floats_tensor((1, 3, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
lowerCAmelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCAmelCase_ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCAmelCase_ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase ( self : Optional[int] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowerCamelCase ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase ( self : Tuple ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase ( self : List[str] ):
self._test_save_load_local()
def lowerCamelCase ( self : Optional[int] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 241
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = 'facebook/bart-large-mnli'
__UpperCAmelCase : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
__UpperCAmelCase : Optional[int] = 'text_classifier'
__UpperCAmelCase : int = AutoTokenizer
__UpperCAmelCase : Dict = AutoModelForSequenceClassification
__UpperCAmelCase : int = ['text', ['text']]
__UpperCAmelCase : Optional[int] = ['text']
def lowercase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
super().setup()
UpperCAmelCase__ = self.model.config
UpperCAmelCase__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCAmelCase__ = int(__UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = labels
return self.pre_processor(
[text] * len(__UpperCAmelCase ) , [f"""This example is {label}""" for label in labels] , return_tensors="pt" , padding="max_length" , )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = outputs.logits
UpperCAmelCase__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 143
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __A, __A=False ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( __A, __A, __A=False ) -> Tuple:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase__ = ""
else:
UpperCAmelCase__ = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase__ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ = in_proj_bias[: config.hidden_size]
UpperCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = dct.pop(__A )
UpperCAmelCase__ = val
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__ = Image.open(requests.get(__A, stream=__A ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __A, __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase__ = 1_000
UpperCAmelCase__ = "huggingface/label-files"
UpperCAmelCase__ = "imagenet-1k-id2label.json"
UpperCAmelCase__ = json.load(open(hf_hub_download(__A, __A, repo_type="dataset" ), "r" ) )
UpperCAmelCase__ = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ = int(deit_name[-6:-4] )
UpperCAmelCase__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCAmelCase__ = 192
UpperCAmelCase__ = 768
UpperCAmelCase__ = 12
UpperCAmelCase__ = 3
elif deit_name[9:].startswith("small" ):
UpperCAmelCase__ = 384
UpperCAmelCase__ = 1_536
UpperCAmelCase__ = 12
UpperCAmelCase__ = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCAmelCase__ = 1_024
UpperCAmelCase__ = 4_096
UpperCAmelCase__ = 24
UpperCAmelCase__ = 16
# load original model from timm
UpperCAmelCase__ = timm.create_model(__A, pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ = timm_model.state_dict()
UpperCAmelCase__ = create_rename_keys(__A, __A )
for src, dest in rename_keys:
rename_key(__A, __A, __A )
read_in_q_k_v(__A, __A, __A )
# load HuggingFace model
UpperCAmelCase__ = DeiTForImageClassificationWithTeacher(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase__ = DeiTImageProcessor(size=__A, crop_size=config.image_size )
UpperCAmelCase__ = image_processor(images=prepare_img(), return_tensors="pt" )
UpperCAmelCase__ = encoding["pixel_values"]
UpperCAmelCase__ = model(__A )
UpperCAmelCase__ = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A, outputs.logits, atol=1e-3 )
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 143
| 1
|
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _A ( A__ ):
"""simple docstring"""
return (data["data"], data["target"])
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = XGBClassifier()
classifier.fit(A__ , A__ )
return classifier
def _A ( ):
"""simple docstring"""
__lowercase = load_iris()
__lowercase , __lowercase = data_handling(A__ )
__lowercase , __lowercase , __lowercase , __lowercase = train_test_split(
A__ , A__ , test_size=0.2_5 )
__lowercase = iris['''target_names''']
# Create an XGBoost Classifier from the training data
__lowercase = xgboost(A__ , A__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
A__ , A__ , A__ , display_labels=A__ , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 104
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase_ ( _a : str , _a : Any=100 , _a : int=" " ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = text.split(_a )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )]
def lowerCamelCase_ ( _a : dict ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(_a ):
titles.append(title if title is not None else """""" )
texts.append(_a )
return {"title": titles, "text": texts}
def lowerCamelCase_ ( _a : dict , _a : DPRContextEncoder , _a : DPRContextEncoderTokenizerFast ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=_a , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
UpperCAmelCase_ : Tuple = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase_ ( _a : "RagExampleArguments" , _a : "ProcessingArguments" , _a : "IndexHnswArguments" , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCAmelCase_ : Optional[int] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCAmelCase_ : Tuple = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCAmelCase_ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a )
UpperCAmelCase_ : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCAmelCase_ : Any = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
UpperCAmelCase_ : List[str] = dataset.map(
partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , )
# And finally save your dataset
UpperCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(_a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCAmelCase_ : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=_a )
# And save the index
UpperCAmelCase_ : Optional[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(_a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _snake_case :
'''simple docstring'''
A__ : str = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
A__ : Optional[str] = field(
default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
A__ : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
A__ : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
A__ : Optional[str] = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : Optional[int] = field(
default=__snake_case , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
A__ : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
A__ : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 345
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
class _A ( a_ ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = feature_size
lowercase = sampling_rate
lowercase = padding_value
lowercase = kwargs.pop("""padding_side""" , """right""" )
lowercase = kwargs.pop("""return_attention_mask""" , __lowerCAmelCase )
super().__init__(**__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
lowercase = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
lowercase = processed_features[self.model_input_names[0]]
lowercase = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__lowerCAmelCase ) == 0:
if return_attention_mask:
lowercase = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowercase = required_input[0]
if isinstance(__lowerCAmelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowercase = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__lowerCAmelCase ):
lowercase = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__lowerCAmelCase ):
lowercase = """tf"""
elif is_torch_tensor(__lowerCAmelCase ):
lowercase = """pt"""
elif isinstance(__lowerCAmelCase , (int, float, list, tuple, np.ndarray) ):
lowercase = """np"""
else:
raise ValueError(
f'type of {first_element} unknown: {type(__lowerCAmelCase )}. '
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
lowercase = to_numpy(__lowerCAmelCase )
else:
lowercase = [to_numpy(__lowerCAmelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
lowercase = self._get_padding_strategies(padding=__lowerCAmelCase , max_length=__lowerCAmelCase )
lowercase = processed_features[self.model_input_names[0]]
lowercase = len(__lowerCAmelCase )
if not all(len(__lowerCAmelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
lowercase = []
for i in range(__lowerCAmelCase ):
lowercase = {k: v[i] for k, v in processed_features.items()}
# truncation
lowercase = self._truncate(
__lowerCAmelCase , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , truncation=__lowerCAmelCase , )
truncated_inputs.append(__lowerCAmelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowercase = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
lowercase = PaddingStrategy.MAX_LENGTH
lowercase = {}
for i in range(__lowerCAmelCase ):
# padding
lowercase = self._pad(
truncated_inputs[i] , max_length=__lowerCAmelCase , padding_strategy=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
lowercase = []
if value.dtype is np.dtype(np.floataa ):
lowercase = value.astype(np.floataa )
batch_outputs[key].append(__lowerCAmelCase )
return BatchFeature(__lowerCAmelCase , tensor_type=__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = PaddingStrategy.DO_NOT_PAD , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
"""simple docstring"""
lowercase = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowercase = len(__lowerCAmelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__lowerCAmelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowercase = np.ones(len(__lowerCAmelCase ) , dtype=np.intaa )
if needs_to_be_padded:
lowercase = max_length - len(__lowerCAmelCase )
if self.padding_side == "right":
if return_attention_mask:
lowercase = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
lowercase = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowercase = np.pad(
__lowerCAmelCase , __lowerCAmelCase , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
lowercase = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
lowercase = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowercase = np.pad(
__lowerCAmelCase , __lowerCAmelCase , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
lowercase = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase = len(__lowerCAmelCase ) > max_length
if needs_to_be_truncated:
lowercase = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowercase = processed_features["""attention_mask"""][:max_length]
return processed_features
def A__ ( self , __lowerCAmelCase=False , __lowerCAmelCase=None ):
"""simple docstring"""
if padding is not False:
if padding is True:
lowercase = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = PaddingStrategy(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = padding
else:
lowercase = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 371
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A__ ( self , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = {}
if top_k is not None:
lowercase = top_k
return {}, {}, postprocess_params
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = load_image(__lowerCAmelCase )
lowercase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.model(**__lowerCAmelCase )
return model_outputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowercase = self.model.config.num_labels
if self.framework == "pt":
lowercase = model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase = probs.topk(__lowerCAmelCase )
elif self.framework == "tf":
lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase )
lowercase , lowercase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowercase = scores.tolist()
lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase )]
| 32
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=[0, 1, 2, 3] , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=[1, 3_84, 24, 24] , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Union[str, Any]:
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = is_training
a_ = use_labels
a_ = hidden_size
a_ = num_hidden_layers
a_ = backbone_out_indices
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = num_labels
a_ = backbone_featmap_shape
a_ = scope
a_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
a_ = (image_size // patch_size) ** 2
a_ = num_patches + 1
def UpperCAmelCase__ ( self) ->List[str]:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self) ->int:
a_ = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 1_92, 3_84, 7_68],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Dict:
a_ = DPTModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = self.num_labels
a_ = DPTForDepthEstimation(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
a_ = self.num_labels
a_ = DPTForSemanticSegmentation(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a_ : List[Any] = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ : List[str] = False
a_ : Union[str, Any] = False
a_ : Optional[Any] = False
def UpperCAmelCase__ ( self) ->int:
a_ = DPTModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->Optional[Any]:
pass
def UpperCAmelCase__ ( self) ->Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear))
def UpperCAmelCase__ ( self) ->Tuple:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
a_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
if model_class in get_values(__UpperCAmelCase):
continue
a_ = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.train()
a_ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase)
a_ = model(**__UpperCAmelCase).loss
loss.backward()
def UpperCAmelCase__ ( self) ->str:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = False
a_ = True
if model_class in get_values(__UpperCAmelCase) or not model_class.supports_gradient_checkpointing:
continue
a_ = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.gradient_checkpointing_enable()
model.train()
a_ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase)
a_ = model(**__UpperCAmelCase).loss
loss.backward()
def UpperCAmelCase__ ( self) ->Tuple:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = _config_zero_init(__UpperCAmelCase)
for model_class in self.all_model_classes:
a_ = model_class(config=__UpperCAmelCase)
# Skip the check for the backbone
a_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
a_ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def UpperCAmelCase__ ( self) ->List[Any]:
pass
@slow
def UpperCAmelCase__ ( self) ->Any:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
a_ = DPTModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = "add"
with self.assertRaises(__UpperCAmelCase):
a_ = DPTForDepthEstimation(__UpperCAmelCase)
def UpperCamelCase ( ) ->Tuple:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas")
a_ = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to(__UpperCAmelCase)
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(**__UpperCAmelCase)
a_ = outputs.predicted_depth
# verify the predicted depth
a_ = torch.Size((1, 3_84, 3_84))
self.assertEqual(predicted_depth.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]]).to(__UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __UpperCAmelCase , atol=1E-4))
| 243
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 243
| 1
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__A = 0
__A = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__A = tuple[int, int]
class lowercase :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Node | None , ) -> str:
UpperCAmelCase_= pos_x
UpperCAmelCase_= pos_y
UpperCAmelCase_= (pos_y, pos_x)
UpperCAmelCase_= goal_x
UpperCAmelCase_= goal_y
UpperCAmelCase_= g_cost
UpperCAmelCase_= parent
UpperCAmelCase_= self.calculate_heuristic()
UpperCAmelCase_= self.g_cost + self.h_cost
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase_= self.pos_x - self.goal_x
UpperCAmelCase_= self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_a ) + abs(_a )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Any , __UpperCAmelCase : Node ) -> Optional[Any]:
return self.f_cost < other.f_cost
class lowercase :
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : TPosition , __UpperCAmelCase : TPosition ) -> int:
UpperCAmelCase_= Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
UpperCAmelCase_= Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _a )
UpperCAmelCase_= [self.start]
UpperCAmelCase_= []
UpperCAmelCase_= False
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_= self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_a )
self.closed_nodes.append(_a )
UpperCAmelCase_= self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
UpperCAmelCase_= self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
return [self.start.pos]
def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Node ) -> Any:
UpperCAmelCase_= []
for action in delta:
UpperCAmelCase_= parent.pos_x + action[1]
UpperCAmelCase_= parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Node | None ) -> Optional[Any]:
UpperCAmelCase_= node
UpperCAmelCase_= []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_= current_node.parent
path.reverse()
return path
class lowercase :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : TPosition , __UpperCAmelCase : TPosition ) -> List[Any]:
UpperCAmelCase_= AStar(_a , _a )
UpperCAmelCase_= AStar(_a , _a )
UpperCAmelCase_= False
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_= self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase_= self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_a , _a )
self.fwd_astar.closed_nodes.append(_a )
self.bwd_astar.closed_nodes.append(_a )
UpperCAmelCase_= current_bwd_node
UpperCAmelCase_= current_fwd_node
UpperCAmelCase_= {
self.fwd_astar: self.fwd_astar.get_successors(_a ),
self.bwd_astar: self.bwd_astar.get_successors(_a ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_a )
else:
# retrieve the best current path
UpperCAmelCase_= astar.open_nodes.pop(
astar.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_a )
else:
astar.open_nodes.append(_a )
return [self.fwd_astar.start.pos]
def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Node , __UpperCAmelCase : Node ) -> List[str]:
UpperCAmelCase_= self.fwd_astar.retrace_path(_a )
UpperCAmelCase_= self.bwd_astar.retrace_path(_a )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_= fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__A = (0, 0)
__A = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__A = time.time()
__A = AStar(init, goal)
__A = a_star.search()
__A = time.time() - start_time
print(f'AStar execution time = {end_time:f} seconds')
__A = time.time()
__A = BidirectionalAStar(init, goal)
__A = time.time() - bd_start_time
print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 370
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __a ( lowerCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_= [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ ,lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : Any ) -> str:
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_= emb.weight.shape
UpperCAmelCase_= nn.Linear(lowerCAmelCase_ ,lowerCAmelCase_ ,bias=lowerCAmelCase_ )
UpperCAmelCase_= emb.weight.data
return lin_layer
def __a ( lowerCAmelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_= torch.load(lowerCAmelCase_ ,map_location="""cpu""" )
UpperCAmelCase_= Namespace(**checkpoint["""cfg"""]["""model"""] )
UpperCAmelCase_= checkpoint["""model"""]
remove_ignore_keys_(lowerCAmelCase_ )
UpperCAmelCase_= state_dict["""decoder.embed_tokens.weight"""].shape[0]
UpperCAmelCase_= {key.replace("""decoder""" ,"""model""" ): val for key, val in state_dict.items()}
UpperCAmelCase_= XGLMConfig(
vocab_size=lowerCAmelCase_ ,max_position_embeddings=args.max_target_positions ,num_layers=args.decoder_layers ,attention_heads=args.decoder_attention_heads ,ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.decoder_embed_dim ,layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="""gelu""" ,scale_embedding=not args.no_scale_embedding ,tie_word_embeddings=args.share_decoder_input_output_embed ,)
UpperCAmelCase_= XGLMForCausalLM(lowerCAmelCase_ )
UpperCAmelCase_= model.load_state_dict(lowerCAmelCase_ ,strict=lowerCAmelCase_ )
print(lowerCAmelCase_ )
UpperCAmelCase_= make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__A = parser.parse_args()
__A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 277
| 0
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_UpperCamelCase : Dict = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
_UpperCamelCase : Optional[Any] = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __UpperCAmelCase ( A : List[str] ) -> List[Any]:
UpperCAmelCase_ : int = list(state_dict.keys() )
for name in state_dict_keys:
UpperCAmelCase_ : str = state_dict.pop(A )
# emb -> embedding
if name.startswith('''emb.''' ):
UpperCAmelCase_ : Any = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
UpperCAmelCase_ : str = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
UpperCAmelCase_ : Optional[int] = re.sub(r'''blocks\.(\d+)\.att''' , r'''blocks.\1.attention''' , A )
# ffn -> feed_forward
UpperCAmelCase_ : Dict = re.sub(r'''blocks\.(\d+)\.ffn''' , r'''blocks.\1.feed_forward''' , A )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
UpperCAmelCase_ : Tuple = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
UpperCAmelCase_ : Tuple = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
UpperCAmelCase_ : Optional[int] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
UpperCAmelCase_ : int = '''rwkv.''' + name
UpperCAmelCase_ : List[str] = weight
return state_dict
def __UpperCAmelCase ( A : int , A : str , A : List[str] , A : int=None , A : Dict=None , A : Union[str, Any]=False , A : str=None ) -> str:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
UpperCAmelCase_ : str = 5_0_2_7_7
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
UpperCAmelCase_ : Dict = PreTrainedTokenizerFast(tokenizer_file=A )
UpperCAmelCase_ : List[Any] = len(A )
tokenizer.save_pretrained(A )
# 2. Build the config
UpperCAmelCase_ : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCAmelCase_ : Tuple = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
UpperCAmelCase_ : str = RwkvConfig(
vocab_size=A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(A )
# 3. Download model file then convert state_dict
UpperCAmelCase_ : Dict = hf_hub_download(A , A )
UpperCAmelCase_ : List[str] = torch.load(A , map_location='''cpu''' )
UpperCAmelCase_ : str = convert_state_dict(A )
# 4. Split in shards and save
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = shard_checkpoint(A )
for shard_file, shard in shards.items():
torch.save(A , os.path.join(A , A ) )
if index is not None:
UpperCAmelCase_ : Union[str, Any] = os.path.join(A , A )
# Save the index as well
with open(A , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Tuple = json.dumps(A , indent=2 , sort_keys=A ) + '''\n'''
f.write(A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
UpperCAmelCase_ : Optional[int] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCAmelCase_ : Tuple = torch.load(os.path.join(A , A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(A , A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
UpperCAmelCase_ : Optional[int] = AutoModelForCausalLM.from_pretrained(A )
model.push_to_hub(A , max_shard_size='''2GB''' )
tokenizer.push_to_hub(A )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
_UpperCamelCase : str = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 304
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case__ ( unittest.TestCase):
def __init__( self : int , _A : List[str] , _A : Dict=7 , _A : List[str]=3 , _A : List[str]=18 , _A : Dict=30 , _A : Union[str, Any]=4_00 , _A : List[str]=True , _A : List[str]=None , _A : int=True , _A : Tuple=None , _A : Union[str, Any]=True , _A : Tuple=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Tuple=False , ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Optional[Any] = do_center_crop
UpperCAmelCase_ : Optional[int] = crop_size
UpperCAmelCase_ : Tuple = do_normalize
UpperCAmelCase_ : Optional[Any] = image_mean
UpperCAmelCase_ : int = image_std
UpperCAmelCase_ : List[Any] = do_reduce_labels
def A ( self : Union[str, Any] ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : Optional[Any] = Image.open(dataset[0]['''file'''] )
UpperCAmelCase_ : str = Image.open(dataset[1]['''file'''] )
return image, map
def __UpperCAmelCase ( ) -> Any:
UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : int = Image.open(ds[0]['''file'''] )
UpperCAmelCase_ : Optional[Any] = Image.open(ds[1]['''file'''] )
UpperCAmelCase_ : Dict = Image.open(ds[2]['''file'''] )
UpperCAmelCase_ : List[str] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = BeitImageProcessor if is_vision_available() else None
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = BeitImageProcessingTester(self )
@property
def A ( self : List[Any] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''center_crop''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
def A ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , _A )
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , _A )
def A ( self : Optional[Any] ) -> Any:
pass
def A ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[int] ) -> str:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Any ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
UpperCAmelCase_ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : List[str] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_semantic_batch_inputs()
UpperCAmelCase_ : int = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def A ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 304
| 1
|
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=1e-12 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(lowercase_ ,axis=1 ) ,a_min=lowercase_ ) ).T
_UpperCamelCase : int = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(lowercase_ ,axis=1 ) ,a_min=lowercase_ ) ).T
return jnp.matmul(lowercase_ ,norm_emb_a.T )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :CLIPConfig
SCREAMING_SNAKE_CASE__ :jnp.dtype = jnp.floataa
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Any = FlaxCLIPVisionModule(self.config.vision_config )
_UpperCamelCase : Dict = nn.Dense(self.config.projection_dim , use_bias=__a , dtype=self.dtype )
_UpperCamelCase : Tuple = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
_UpperCamelCase : Tuple = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
_UpperCamelCase : Union[str, Any] = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
_UpperCamelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self : Tuple , __a : str ) -> str:
_UpperCamelCase : Union[str, Any] = self.vision_model(__a )[1]
_UpperCamelCase : Optional[int] = self.visual_projection(__a )
_UpperCamelCase : Any = jax_cosine_distance(__a , self.special_care_embeds )
_UpperCamelCase : List[str] = jax_cosine_distance(__a , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
_UpperCamelCase : int = 0.0
_UpperCamelCase : str = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
_UpperCamelCase : Any = jnp.round(__a , 3 )
_UpperCamelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__a )
# Use a lower threshold if an image has any special care concept
_UpperCamelCase : Optional[int] = is_special_care * 0.01
_UpperCamelCase : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
_UpperCamelCase : List[str] = jnp.round(__a , 3 )
_UpperCamelCase : Union[str, Any] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = CLIPConfig
SCREAMING_SNAKE_CASE__ :Dict = "clip_input"
SCREAMING_SNAKE_CASE__ :Union[str, Any] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Dict , __a : CLIPConfig , __a : Optional[Tuple] = None , __a : int = 0 , __a : jnp.dtype = jnp.floataa , __a : bool = True , **__a : Optional[int] , ) -> Tuple:
if input_shape is None:
_UpperCamelCase : Optional[Any] = (1, 224, 224, 3)
_UpperCamelCase : List[str] = self.module_class(config=__a , dtype=__a , **__a )
super().__init__(__a , __a , input_shape=__a , seed=__a , dtype=__a , _do_init=_do_init )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : jax.random.KeyArray , __a : Tuple , __a : FrozenDict = None ) -> FrozenDict:
# init input tensor
_UpperCamelCase : Optional[int] = jax.random.normal(__a , __a )
_UpperCamelCase, _UpperCamelCase : Tuple = jax.random.split(__a )
_UpperCamelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
_UpperCamelCase : Optional[int] = self.module.init(__a , __a )["params"]
return random_params
def __call__( self : Tuple , __a : List[Any] , __a : dict = None , ) -> str:
_UpperCamelCase : Tuple = jnp.transpose(__a , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__a , dtype=jnp.floataa ) , rngs={} , )
| 310
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "://" in dataset_path:
_UpperCamelCase : List[Any] = dataset_path.split("://" )[1]
return dataset_path
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ )
def lowercase__ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn ,"reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : str = None
_UpperCamelCase : str = threading.Lock()
| 310
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: int ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
UpperCamelCase__ : List[str] = ksize + 1
UpperCamelCase__ : Dict = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__UpperCAmelCase ):
for x in range(__UpperCAmelCase ):
# distance from center
UpperCamelCase__ : Optional[Any] = x - ksize // 2
UpperCamelCase__ : int = y - ksize // 2
# degree to radiant
UpperCamelCase__ : Optional[Any] = theta / 180 * np.pi
UpperCamelCase__ : Dict = np.cos(_theta )
UpperCamelCase__ : List[str] = np.sin(_theta )
# get kernel x
UpperCamelCase__ : int = cos_theta * px + sin_theta * py
# get kernel y
UpperCamelCase__ : List[str] = -sin_theta * px + cos_theta * py
# fill kernel
UpperCamelCase__ : List[Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
UpperCAmelCase_ = imread('../image_data/lena.jpg')
# turn image in gray scale value
UpperCAmelCase_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
UpperCAmelCase_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
UpperCAmelCase_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
UpperCAmelCase_ = out / out.max() * 255
UpperCAmelCase_ = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 201
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = 256
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : List[str] = ["melgan"]
def __init__( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
UpperCamelCase__ : Optional[int] = math.log(1E-5 ) # Matches MelGAN training.
UpperCamelCase__ : int = 4.0 # Largest value for most examples
UpperCamelCase__ : Optional[int] = 128
self.register_modules(
notes_encoder=__magic_name__, continuous_encoder=__magic_name__, decoder=__magic_name__, scheduler=__magic_name__, melgan=__magic_name__, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=(-1.0, 1.0), __magic_name__=False ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : str = output_range
if clip:
UpperCamelCase__ : Union[str, Any] = torch.clip(__magic_name__, self.min_value, self.max_value )
# Scale to [0, 1].
UpperCamelCase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=(-1.0, 1.0), __magic_name__=False ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[str] = input_range
UpperCamelCase__ : Any = torch.clip(__magic_name__, __magic_name__, __magic_name__ ) if clip else outputs
# Scale to [0, 1].
UpperCamelCase__ : Any = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = input_tokens > 0
UpperCamelCase__ ,UpperCamelCase__ : Any = self.notes_encoder(
encoder_input_tokens=__magic_name__, encoder_inputs_mask=__magic_name__ )
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self.continuous_encoder(
encoder_inputs=__magic_name__, encoder_inputs_mask=__magic_name__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : Any = noise_time
if not torch.is_tensor(__magic_name__ ):
UpperCamelCase__ : Tuple = torch.tensor([timesteps], dtype=torch.long, device=input_tokens.device )
elif torch.is_tensor(__magic_name__ ) and len(timesteps.shape ) == 0:
UpperCamelCase__ : Union[str, Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase__ : Dict = timesteps * torch.ones(input_tokens.shape[0], dtype=timesteps.dtype, device=timesteps.device )
UpperCamelCase__ : List[str] = self.decoder(
encodings_and_masks=__magic_name__, decoder_input_tokens=__magic_name__, decoder_noise_time=__magic_name__ )
return logits
@torch.no_grad()
def __call__( self, __magic_name__, __magic_name__ = None, __magic_name__ = 100, __magic_name__ = True, __magic_name__ = "numpy", __magic_name__ = None, __magic_name__ = 1, ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__, __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__magic_name__ )}." )
UpperCamelCase__ : Dict = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims], dtype=np.floataa )
UpperCamelCase__ : Tuple = np.zeros([1, 0, self.n_dims], np.floataa )
UpperCamelCase__ : List[Any] = torch.ones((1, TARGET_FEATURE_LENGTH), dtype=__magic_name__, device=self.device )
for i, encoder_input_tokens in enumerate(__magic_name__ ):
if i == 0:
UpperCamelCase__ : str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device, dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCamelCase__ : Any = torch.zeros((1, TARGET_FEATURE_LENGTH), dtype=__magic_name__, device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCamelCase__ : List[str] = ones
UpperCamelCase__ : int = self.scale_features(
__magic_name__, output_range=[-1.0, 1.0], clip=__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ), continuous_inputs=__magic_name__, continuous_mask=__magic_name__, )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCamelCase__ : Optional[int] = randn_tensor(
shape=encoder_continuous_inputs.shape, generator=__magic_name__, device=self.device, dtype=self.decoder.dtype, )
# set step values
self.scheduler.set_timesteps(__magic_name__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase__ : Union[str, Any] = self.decode(
encodings_and_masks=__magic_name__, input_tokens=__magic_name__, noise_time=t / self.scheduler.config.num_train_timesteps, )
# Compute previous output: x_t -> x_t-1
UpperCamelCase__ : List[Any] = self.scheduler.step(__magic_name__, __magic_name__, __magic_name__, generator=__magic_name__ ).prev_sample
UpperCamelCase__ : List[Any] = self.scale_to_features(__magic_name__, input_range=[-1.0, 1.0] )
UpperCamelCase__ : List[Any] = mel[:1]
UpperCamelCase__ : int = mel.cpu().float().numpy()
UpperCamelCase__ : Union[str, Any] = np.concatenate([full_pred_mel, pred_mel[:1]], axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__, __magic_name__ )
logger.info('''Generated segment''', __magic_name__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
UpperCamelCase__ : Optional[int] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCamelCase__ : Any = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__magic_name__ )
| 201
| 1
|
"""simple docstring"""
import os
def A_ ( ):
with open(os.path.dirname(_UpperCAmelCase ) + "/p022_names.txt" ) as file:
SCREAMING_SNAKE_CASE_: List[str] = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE_: Optional[int] = names.replace("\"" , "" ).split("," )
names.sort()
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: List[str] = 0
for i, name in enumerate(_UpperCAmelCase ):
for letter in name:
name_score += ord(_UpperCAmelCase ) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE_: Optional[int] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 364
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = ['''pixel_values''']
def __init__( self : List[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : Tuple , ):
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = size if size is not None else {"height": 384, "width": 384}
SCREAMING_SNAKE_CASE_: Union[str, Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = do_resize
SCREAMING_SNAKE_CASE_: Dict = size
SCREAMING_SNAKE_CASE_: int = resample
SCREAMING_SNAKE_CASE_: str = do_rescale
SCREAMING_SNAKE_CASE_: str = rescale_factor
SCREAMING_SNAKE_CASE_: Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_: List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_: Optional[int] = do_convert_rgb
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
SCREAMING_SNAKE_CASE_: int = (size["height"], size["width"])
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, float] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : List[Any] , ):
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : List[Any] , ):
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ : Dict , ):
SCREAMING_SNAKE_CASE_: Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Optional[int] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: Any = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: Optional[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_: Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_: str = size if size is not None else self.size
SCREAMING_SNAKE_CASE_: List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_: List[Any] = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: List[Any] = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: int = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: List[str] = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: str = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: List[str] = BatchFeature(data={"pixel_values": images} , tensor_type=lowerCAmelCase__)
return encoded_outputs
| 127
| 0
|
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
if n == 1 or not isinstance(__snake_case , __snake_case ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase_ : Tuple = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Any = 2
while digits < n:
index += 1
UpperCAmelCase_ : Tuple = len(str(fibonacci(__snake_case ) ) )
return index
def lowercase__ ( __snake_case : int = 1_000 ):
'''simple docstring'''
return fibonacci_digits_index(__snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 29
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(UpperCAmelCase_ )
def UpperCamelCase ( self: str , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self: Optional[Any] , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Tuple=None , **UpperCAmelCase_: Any ):
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict=64 , UpperCAmelCase_: int = 0 , UpperCAmelCase_: float = 512 / 1_500 , UpperCAmelCase_: Optional[int] = 32 , UpperCAmelCase_: Optional[int] = 1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=0.88 , UpperCAmelCase_: Dict=0.95 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: str=1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Any=0.7 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 306
| 0
|
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
UpperCAmelCase_ = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Dict = CamembertTokenizer
lowerCAmelCase_ : str = CamembertTokenizerFast
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Optional[Any] = True
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = CamembertTokenizer(_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = """<pad>"""
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_UpperCAmelCase ) , 10_04 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = CamembertTokenizer(_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCAmelCase__ = """I was born in 92000, and this is falsé."""
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = """I was born in 92000, and this is falsé."""
UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCAmelCase__ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=_UpperCAmelCase , )
| 61
|
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'nielsr/canine-s': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
UpperCAmelCase_ = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0Xe000
UpperCAmelCase_ = 0Xe001
UpperCAmelCase_ = 0Xe002
UpperCAmelCase_ = 0Xe003
UpperCAmelCase_ = 0Xe004
# Maps special codepoints to human-readable names.
UpperCAmelCase_ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
UpperCAmelCase_ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , _UpperCAmelCase : Dict=chr(_UpperCAmelCase ) , _UpperCAmelCase : int=chr(_UpperCAmelCase ) , _UpperCAmelCase : List[Any]=chr(_UpperCAmelCase ) , _UpperCAmelCase : Any=chr(_UpperCAmelCase ) , _UpperCAmelCase : Dict=chr(_UpperCAmelCase ) , _UpperCAmelCase : List[Any]=chr(_UpperCAmelCase ) , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Dict=20_48 , **_UpperCAmelCase : Union[str, Any] , ):
"""simple docstring"""
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , model_max_length=_UpperCAmelCase , **_UpperCAmelCase , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCAmelCase__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCAmelCase__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCAmelCase__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCAmelCase__ = UNICODE_VOCAB_SIZE
UpperCAmelCase__ = len(self._special_codepoints )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return self._unicode_vocab_size
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : str ):
"""simple docstring"""
return list(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : str ):
"""simple docstring"""
try:
return ord(_UpperCAmelCase )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ):
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_UpperCAmelCase )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
return "".join(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
UpperCAmelCase__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
UpperCAmelCase__ = [1] + ([0] * len(_UpperCAmelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(_UpperCAmelCase )) + [1]
return result
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
UpperCAmelCase__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
"""simple docstring"""
return ()
| 61
| 1
|
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
def constraint_to_multiple_of(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase=0, __lowerCamelCase=None ):
UpperCAmelCase_ : Tuple = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCAmelCase_ : List[str] = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCAmelCase_ : int = math.ceil(val / multiple ) * multiple
return x
UpperCAmelCase_ : List[Any] = (output_size, output_size) if isinstance(__lowerCamelCase, __lowerCamelCase ) else output_size
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_image_size(__lowerCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = output_size
# determine new height and width
UpperCAmelCase_ : Union[str, Any] = output_height / input_height
UpperCAmelCase_ : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCAmelCase_ : List[str] = scale_width
else:
# fit height
UpperCAmelCase_ : List[Any] = scale_height
UpperCAmelCase_ : Optional[Any] = constraint_to_multiple_of(scale_height * input_height, multiple=__lowerCamelCase )
UpperCAmelCase_ : Dict = constraint_to_multiple_of(scale_width * input_width, multiple=__lowerCamelCase )
return (new_height, new_width)
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = ["""pixel_values"""]
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = False , lowercase_ = 1 , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCAmelCase_ : List[str] = size if size is not None else {"height": 384, "width": 384}
UpperCAmelCase_ : List[str] = get_size_dict(lowercase_ )
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : int = size
UpperCAmelCase_ : Any = keep_aspect_ratio
UpperCAmelCase_ : Tuple = ensure_multiple_of
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : Union[str, Any] = do_rescale
UpperCAmelCase_ : List[Any] = rescale_factor
UpperCAmelCase_ : Dict = do_normalize
UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = 1 , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Any = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCAmelCase_ : Dict = get_resize_output_image_size(
lowercase_ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : Optional[int] = size if size is not None else self.size
UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowercase_ )
UpperCAmelCase_ : Optional[int] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCAmelCase_ : Dict = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCAmelCase_ : Optional[int] = resample if resample is not None else self.resample
UpperCAmelCase_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Any = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : str = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase_ : Tuple = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase_ : List[Any] = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase_ : Dict = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase_ : Tuple = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase_ : Tuple = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowercase_ ):
UpperCAmelCase_ : Union[str, Any] = target_sizes.numpy()
UpperCAmelCase_ : int = []
for idx in range(len(lowercase_ ) ):
UpperCAmelCase_ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowercase_ )
UpperCAmelCase_ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
UpperCAmelCase_ : Tuple = logits.argmax(dim=1 )
UpperCAmelCase_ : Optional[int] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 61
|
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A =get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :int = AlbertTokenizer
lowerCAmelCase :int = AlbertTokenizerFast
lowerCAmelCase :List[str] = True
lowerCAmelCase :List[str] = True
lowerCAmelCase :str = True
def snake_case__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Optional[int] = AlbertTokenizer(_lowerCamelCase)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Dict = """this is a test"""
UpperCAmelCase__ : int = """this is a test"""
return input_text, output_text
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = """<pad>"""
UpperCAmelCase__ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase) , _lowerCamelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase) , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<pad>""")
self.assertEqual(vocab_keys[1] , """<unk>""")
self.assertEqual(vocab_keys[-1] , """▁eloquent""")
self.assertEqual(len(_lowerCamelCase) , 3_0000)
def snake_case__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000)
def snake_case__ ( self):
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase__ : List[Any] = """I was born in 92000, and this is falsé."""
UpperCAmelCase__ : Any = tokenizer.tokenize(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = rust_tokenizer.tokenize(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[str] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[str] = self.get_rust_tokenizer()
UpperCAmelCase__ : List[Any] = tokenizer.encode(_lowerCamelCase)
UpperCAmelCase__ : Dict = rust_tokenizer.encode(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = AlbertTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = tokenizer.tokenize("""This is a test""")
self.assertListEqual(_lowerCamelCase , ["""▁this""", """▁is""", """▁a""", """▁test"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase) , [48, 25, 21, 1289])
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
_lowerCamelCase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""])
UpperCAmelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9])
UpperCAmelCase__ : List[Any] = tokenizer.convert_ids_to_tokens(_lowerCamelCase)
self.assertListEqual(
_lowerCamelCase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = AlbertTokenizer(_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = tokenizer.encode("""sequence builders""")
UpperCAmelCase__ : Optional[Any] = tokenizer.encode("""multi-sequence build""")
UpperCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def snake_case__ ( self):
# fmt: off
UpperCAmelCase__ : Union[str, Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 163
| 0
|
'''simple docstring'''
lowercase__ = 0 # The first color of the flag.
lowercase__ = 1 # The second color of the flag.
lowercase__ = 2 # The third color of the flag.
lowercase__ = (red, white, blue)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> list:
'''simple docstring'''
if not sequence:
return []
if len(__SCREAMING_SNAKE_CASE ) == 1:
return list(__SCREAMING_SNAKE_CASE )
snake_case : Any = 0
snake_case : int = len(__SCREAMING_SNAKE_CASE ) - 1
snake_case : List[str] = 0
while mid <= high:
if sequence[mid] == colors[0]:
snake_case : Optional[Any] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
snake_case : Optional[int] = sequence[high], sequence[mid]
high -= 1
else:
snake_case : List[str] = F'The elements inside the sequence must contains only {colors} values'
raise ValueError(__SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = input("Enter numbers separated by commas:\n").strip()
lowercase__ = [int(item.strip()) for item in user_input.split(",")]
print(f"{dutch_national_flag_sort(unsorted)}")
| 369
|
'''simple docstring'''
from functools import lru_cache
@lru_cache
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> int:
'''simple docstring'''
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__)
a__ : Tuple = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Optional[int] = "time_series_transformer"
snake_case__ : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "student_t" , UpperCAmelCase__ : str = "nll" , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase__ : Optional[Union[str, bool]] = "mean" , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : int = 3_2 , UpperCAmelCase__ : int = 3_2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : int = 6_4 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 1_0_0 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : Optional[int]=True , **UpperCAmelCase__ : Tuple , ) -> Dict:
# time series specific configuration
__SCREAMING_SNAKE_CASE = prediction_length
__SCREAMING_SNAKE_CASE = context_length or prediction_length
__SCREAMING_SNAKE_CASE = distribution_output
__SCREAMING_SNAKE_CASE = loss
__SCREAMING_SNAKE_CASE = input_size
__SCREAMING_SNAKE_CASE = num_time_features
__SCREAMING_SNAKE_CASE = lags_sequence
__SCREAMING_SNAKE_CASE = scaling
__SCREAMING_SNAKE_CASE = num_dynamic_real_features
__SCREAMING_SNAKE_CASE = num_static_real_features
__SCREAMING_SNAKE_CASE = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__SCREAMING_SNAKE_CASE = cardinality
else:
__SCREAMING_SNAKE_CASE = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__SCREAMING_SNAKE_CASE = embedding_dimension
else:
__SCREAMING_SNAKE_CASE = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__SCREAMING_SNAKE_CASE = num_parallel_samples
# Transformer architecture configuration
__SCREAMING_SNAKE_CASE = input_size * len(UpperCAmelCase__ ) + self._number_of_features
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = use_cache
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def UpperCAmelCase_ ( self : Any ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 54
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "gptsan-japanese"
_UpperCAmelCase = [
"past_key_values",
]
_UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: Optional[Any] , UpperCamelCase: List[str]=3_60_00 , UpperCamelCase: List[str]=12_80 , UpperCamelCase: List[Any]=10_24 , UpperCamelCase: Any=81_92 , UpperCamelCase: Dict=40_96 , UpperCamelCase: Optional[int]=1_28 , UpperCamelCase: Any=10 , UpperCamelCase: List[Any]=0 , UpperCamelCase: Dict=16 , UpperCamelCase: Tuple=16 , UpperCamelCase: Union[str, Any]=1_28 , UpperCamelCase: List[Any]=0.0 , UpperCamelCase: Union[str, Any]=1e-5 , UpperCamelCase: int=False , UpperCamelCase: Optional[int]=0.0 , UpperCamelCase: Dict="float32" , UpperCamelCase: Any=False , UpperCamelCase: Dict=False , UpperCamelCase: List[str]=False , UpperCamelCase: Union[str, Any]=0.002 , UpperCamelCase: int=False , UpperCamelCase: str=True , UpperCamelCase: Dict=3_59_98 , UpperCamelCase: Optional[Any]=3_59_95 , UpperCamelCase: Optional[Any]=3_59_99 , **UpperCamelCase: Optional[int] , ) -> Optional[int]:
snake_case__ = vocab_size
snake_case__ = max_position_embeddings
snake_case__ = d_model
snake_case__ = d_ff
snake_case__ = d_ext
snake_case__ = d_spout
snake_case__ = num_switch_layers
snake_case__ = num_ext_layers
snake_case__ = num_switch_layers + num_ext_layers
snake_case__ = num_heads
snake_case__ = num_experts
snake_case__ = expert_capacity
snake_case__ = dropout_rate
snake_case__ = layer_norm_epsilon
snake_case__ = router_bias
snake_case__ = router_jitter_noise
snake_case__ = router_dtype
snake_case__ = router_ignore_padding_tokens
snake_case__ = output_hidden_states
snake_case__ = output_attentions
snake_case__ = initializer_factor
snake_case__ = output_router_logits
snake_case__ = use_cache
super().__init__(
separator_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , )
| 307
| 0
|
from datetime import datetime as dt
import os
from github import Github
lowerCAmelCase : List[Any] = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def a__ ( ) -> Tuple:
lowerCamelCase = Github(os.environ["""GITHUB_TOKEN"""] )
lowerCamelCase = g.get_repo("""huggingface/transformers""" )
lowerCamelCase = repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda snake_case__ : i.created_at , reverse=snake_case__ )
lowerCamelCase = comments[0] if len(snake_case__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 365
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : int = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "blip_2_vision_model"
def __init__( self , _a=1_408 , _a=6_144 , _a=39 , _a=16 , _a=224 , _a=14 , _a="gelu" , _a=0.00_001 , _a=0.0 , _a=1e-1_0 , _a=True , **_a , ):
"""simple docstring"""
super().__init__(**_a )
lowerCamelCase = hidden_size
lowerCamelCase = intermediate_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = patch_size
lowerCamelCase = image_size
lowerCamelCase = initializer_range
lowerCamelCase = attention_dropout
lowerCamelCase = layer_norm_eps
lowerCamelCase = hidden_act
lowerCamelCase = qkv_bias
@classmethod
def _lowerCAmelCase ( cls , _a , **_a ):
"""simple docstring"""
cls._set_token_in_kwargs(_a )
lowerCamelCase , lowerCamelCase = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
lowerCamelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a , **_a )
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "blip_2_qformer"
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0.02 , _a=1e-1_2 , _a=0 , _a="absolute" , _a=2 , _a=1_408 , **_a , ):
"""simple docstring"""
super().__init__(pad_token_id=_a , **_a )
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = hidden_act
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = initializer_range
lowerCamelCase = layer_norm_eps
lowerCamelCase = position_embedding_type
lowerCamelCase = cross_attention_frequency
lowerCamelCase = encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls , _a , **_a ):
"""simple docstring"""
cls._set_token_in_kwargs(_a )
lowerCamelCase , lowerCamelCase = cls.get_config_dict(_a , **_a )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
lowerCamelCase = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a , **_a )
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "blip-2"
__UpperCamelCase = True
def __init__( self , _a=None , _a=None , _a=None , _a=32 , **_a ):
"""simple docstring"""
super().__init__(**_a )
if vision_config is None:
lowerCamelCase = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
lowerCamelCase = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
lowerCamelCase = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
lowerCamelCase = BlipaVisionConfig(**_a )
lowerCamelCase = BlipaQFormerConfig(**_a )
lowerCamelCase = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowerCamelCase = CONFIG_MAPPING[text_model_type](**_a )
lowerCamelCase = self.text_config.tie_word_embeddings
lowerCamelCase = self.text_config.is_encoder_decoder
lowerCamelCase = num_query_tokens
lowerCamelCase = self.vision_config.hidden_size
lowerCamelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase = 1.0
lowerCamelCase = 0.02
@classmethod
def _lowerCAmelCase ( cls , _a , _a , _a , **_a , ):
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_a , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = copy.deepcopy(self.__dict__ )
lowerCamelCase = self.vision_config.to_dict()
lowerCamelCase = self.qformer_config.to_dict()
lowerCamelCase = self.text_config.to_dict()
lowerCamelCase = self.__class__.model_type
return output
| 168
| 0
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def A_ ( _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = AutoConfig.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
AutoTokenizer.from_pretrained(_UpperCAmelCase ).save_pretrained(_UpperCAmelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 13
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : Tuple=30 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Any=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : int=37 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=10 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=2 , ):
SCREAMING_SNAKE_CASE_: str = parent
SCREAMING_SNAKE_CASE_: Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_: str = image_size
SCREAMING_SNAKE_CASE_: Tuple = patch_size
SCREAMING_SNAKE_CASE_: int = num_channels
SCREAMING_SNAKE_CASE_: List[str] = is_training
SCREAMING_SNAKE_CASE_: str = use_labels
SCREAMING_SNAKE_CASE_: int = hidden_size
SCREAMING_SNAKE_CASE_: List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: Any = intermediate_size
SCREAMING_SNAKE_CASE_: str = hidden_act
SCREAMING_SNAKE_CASE_: str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int = type_sequence_label_size
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: Dict = scope
SCREAMING_SNAKE_CASE_: Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_: List[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_: Dict = num_patches + 1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Optional[int] = ViTForMaskedImageModeling(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Dict = 1
SCREAMING_SNAKE_CASE_: List[str] = ViTForMaskedImageModeling(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_: List[str] = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Any = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: List[str] = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: Dict = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_: Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : Tuple = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Tuple = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = ViTModelTester(self)
SCREAMING_SNAKE_CASE_: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self : str):
pass
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_: List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[Any] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_: str = prepare_img()
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Any = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([-0.2744, 0.8215, -0.0836]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
SCREAMING_SNAKE_CASE_: str = ViTModel.from_pretrained("facebook/dino-vits8").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480)
SCREAMING_SNAKE_CASE_: List[Any] = prepare_img()
SCREAMING_SNAKE_CASE_: List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: int = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__ , interpolate_pos_encoding=lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Tuple = torch.Size((1, 3601, 384))
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto")
SCREAMING_SNAKE_CASE_: int = self.default_image_processor
SCREAMING_SNAKE_CASE_: Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_: Dict = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: str = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
| 13
| 1
|
SCREAMING_SNAKE_CASE__ = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 369
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[32, 64, 128] , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2"] , UpperCAmelCase=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = patch_norm
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = is_training
lowercase_ = scope
lowercase_ = use_labels
lowercase_ = type_sequence_label_size
lowercase_ = encoder_stride
lowercase_ = out_features
lowercase_ = out_indices
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = FocalNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
lowercase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = FocalNetForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.hidden_states
lowercase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# FocalNet has a different seq_length
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = reshaped_hidden_states[0].shape
lowercase_ = (
reshaped_hidden_states[0].view(UpperCAmelCase , UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = FocalNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = FocalNetConfig
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
| 297
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : List[Any] = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 99
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowercase : List[str] = """src/transformers"""
lowercase : Optional[int] = """docs/source/en/tasks"""
def A_ ( A__ , A__ , A__ ) -> Tuple:
with open(A__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a__ : Any = f.readlines()
# Find the start prompt.
a__ : str = 0
while not lines[start_index].startswith(A__ ):
start_index += 1
start_index += 1
a__ : int = start_index
while not lines[end_index].startswith(A__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowercase : Tuple = direct_transformers_import(TRANSFORMERS_PATH)
lowercase : Optional[Any] = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowercase : Optional[Any] = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def A_ ( A__ ) -> Optional[int]:
a__ : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
a__ : int = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(A__ , set() )
a__ : Optional[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def A_ ( A__ , A__=False ) -> Optional[int]:
a__ , a__ , a__ , a__ : Dict = _find_text_in_file(
filename=os.path.join(A__ , A__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
a__ : List[Any] = get_model_list_for_task(A__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(A__ , A__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
' to fix this.' )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase : str = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 99
| 1
|
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCAmelCase__ = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
for attribute in key.split('''.''' ):
__lowercase = getattr(A__ , A__ )
if weight_type is not None:
__lowercase = getattr(A__ , A__ ).shape
else:
__lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
else:
__lowercase = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == '''group''' , )
__lowercase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(A__ )[0].split('''.''' )[-2]
__lowercase = mapped_key.replace('''*''' , A__ )
if "weight_g" in name:
__lowercase = '''weight_g'''
elif "weight_v" in name:
__lowercase = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
__lowercase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase = '''weight'''
else:
__lowercase = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F"Unused weights: {unused_weights}" )
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = full_name.split('''conv_layers.''' )[-1]
__lowercase = name.split('''.''' )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowercase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowercase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowercase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowercase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(A__ )
@torch.no_grad()
def _A ( A__ , A__ , A__=None ):
"""simple docstring"""
__lowercase = torch.load(A__ )
__lowercase = WavLMConfigOrig(checkpoint['''cfg'''] )
__lowercase = WavLMOrig(A__ )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
__lowercase = WavLMConfig.from_pretrained(A__ )
else:
__lowercase = WavLMConfig()
__lowercase = WavLMModel(A__ )
recursively_load_weights(A__ , A__ )
hf_wavlm.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 52
|
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = OpenAIGPTTokenizer
SCREAMING_SNAKE_CASE : str = OpenAIGPTTokenizerFast
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[str] = False
def SCREAMING_SNAKE_CASE ( self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__lowercase = dict(zip(lowercase__ ,range(len(lowercase__ ) ) ) )
__lowercase = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ) as fp:
fp.write(json.dumps(lowercase__ ) )
with open(self.merges_file ,'''w''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Optional[Any] ):
return "lower newer", "lower newer"
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = OpenAIGPTTokenizer(self.vocab_file ,self.merges_file )
__lowercase = '''lower'''
__lowercase = ['''low''', '''er</w>''']
__lowercase = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
__lowercase = tokens + ['''<unk>''']
__lowercase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Dict=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(lowercase__ ,**lowercase__ )
# Simple input
__lowercase = '''This is a simple input'''
__lowercase = ['''This is a simple input 1''', '''This is a simple input 2''']
__lowercase = ('''This is a simple input''', '''This is a pair''')
__lowercase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowercase__ ,tokenizer_r.encode ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Simple input
self.assertRaises(lowercase__ ,tokenizer_r.encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Simple input
self.assertRaises(
lowercase__ ,tokenizer_r.batch_encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' ,)
# Pair input
self.assertRaises(lowercase__ ,tokenizer_r.encode ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Pair input
self.assertRaises(lowercase__ ,tokenizer_r.encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Pair input
self.assertRaises(
lowercase__ ,tokenizer_r.batch_encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
pass
| 52
| 1
|
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
A =True
except ImportError:
A =False
A =logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case_ (_a : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _a ( __a ):
@staticmethod
def A ( lowercase : ArgumentParser ):
'''simple docstring'''
UpperCAmelCase = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=lowercase , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=lowercase , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=lowercase )
def __init__( self : List[Any] , lowercase : bool , lowercase : str , lowercase : Dict=None , *lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = testing
UpperCAmelCase = testing_file
UpperCAmelCase = path
def A ( self : Optional[Any] ):
'''simple docstring'''
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCAmelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(lowercase ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCAmelCase = (
Path(lowercase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCAmelCase = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowercase ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCAmelCase = json.load(lowercase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowercase , extra_context=lowercase , )
UpperCAmelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCAmelCase = json.load(lowercase )
UpperCAmelCase = configuration['''lowercase_modelname''']
UpperCAmelCase = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"{directory}/configuration.json" )
UpperCAmelCase = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCAmelCase = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCAmelCase = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCAmelCase = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(lowercase , exist_ok=lowercase )
os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=lowercase )
# Tests require submodules as they have parent imports
with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , '''w''' ):
pass
shutil.move(
f"{directory}/__init__.py" , f"{model_dir}/__init__.py" , )
shutil.move(
f"{directory}/configuration_{lowercase_model_name}.py" , f"{model_dir}/configuration_{lowercase_model_name}.py" , )
def remove_copy_lines(lowercase : Union[str, Any] ):
with open(lowercase , '''r''' ) as f:
UpperCAmelCase = f.readlines()
with open(lowercase , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowercase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_{lowercase_model_name}.py" , f"{model_dir}/modeling_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_tf_{lowercase_model_name}.py" , f"{model_dir}/modeling_tf_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_tf_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py" )
if output_flax:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_flax_{lowercase_model_name}.py" , f"{model_dir}/modeling_flax_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_flax_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/{lowercase_model_name}.md" , f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , )
shutil.move(
f"{directory}/tokenization_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/tokenization_fast_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowercase : str , lowercase : str , lowercase : List[str] ):
# Create temp file
UpperCAmelCase , UpperCAmelCase = mkstemp()
UpperCAmelCase = False
with fdopen(lowercase , '''w''' ) as new_file:
with open(lowercase ) as old_file:
for line in old_file:
new_file.write(lowercase )
if line_to_copy_below in line:
UpperCAmelCase = True
for line_to_copy in lines_to_copy:
new_file.write(lowercase )
if not line_found:
raise ValueError(f"Line {line_to_copy_below} was not found in file." )
# Copy the file permissions from the old file to the new file
copymode(lowercase , lowercase )
# Remove original file
remove(lowercase )
# Move new file
move(lowercase , lowercase )
def skip_units(lowercase : List[Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowercase : Tuple ):
with open(lowercase ) as datafile:
UpperCAmelCase = []
UpperCAmelCase = False
UpperCAmelCase = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCAmelCase = line.split('''"''' )[1]
UpperCAmelCase = skip_units(lowercase )
elif "# Below: " in line and "##" not in line:
UpperCAmelCase = line.split('''"''' )[1]
UpperCAmelCase = skip_units(lowercase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowercase , lowercase , lowercase )
UpperCAmelCase = []
elif "# Replace with" in line and "##" not in line:
UpperCAmelCase = []
elif "##" not in line:
lines_to_copy.append(lowercase )
remove(lowercase )
replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py" )
os.rmdir(lowercase )
| 34
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def snake_case_ (_a : dict , _a : str , _a : set , _a : set , _a : dict , _a : dict , _a : PriorityQueue , _a : dict , _a : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
UpperCAmelCase = cst_fwd.get(_a , np.inf )
UpperCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
UpperCAmelCase = new_cost_f
UpperCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def snake_case_ (_a : str , _a : str , _a : dict , _a : dict ):
UpperCAmelCase = -1
UpperCAmelCase = set()
UpperCAmelCase = set()
UpperCAmelCase = {source: 0}
UpperCAmelCase = {destination: 0}
UpperCAmelCase = {source: None}
UpperCAmelCase = {destination: None}
UpperCAmelCase = PriorityQueue()
UpperCAmelCase = PriorityQueue()
UpperCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
UpperCAmelCase , UpperCAmelCase = queue_forward.get()
visited_forward.add(_a )
UpperCAmelCase , UpperCAmelCase = queue_backward.get()
visited_backward.add(_a )
UpperCAmelCase = pass_and_relaxation(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
UpperCAmelCase = pass_and_relaxation(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
UpperCAmelCase = shortest_distance
return shortest_path_distance
A ={
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
A ={
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
| 1
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="attention" ):
'''simple docstring'''
lowercase__ : str = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
lowercase__ : Dict = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
lowercase__ : Any = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
lowercase__ : Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=False ):
'''simple docstring'''
if split_mlp_wi:
lowercase__ : Optional[int] = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
lowercase__ : Optional[int] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
lowercase__ : str = (wi_a, wi_a)
else:
lowercase__ : Union[str, Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""]
lowercase__ : Union[str, Any] = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : dict , *, SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool ):
'''simple docstring'''
lowercase__ : str = traverse_util.flatten_dict(variables['target'] )
lowercase__ : List[str] = {'/'.join(SCREAMING_SNAKE_CASE_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase__ : Optional[Any] = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = collections.OrderedDict()
# Shared embeddings.
lowercase__ : Dict = old['token_embedder/embedding']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
lowercase__ : Union[str, Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , 'pre_attention_layer_norm' )
lowercase__ : Dict = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , 'attention' )
lowercase__ : Optional[int] = layer_norm
lowercase__ : Dict = k.T
lowercase__ : List[Any] = o.T
lowercase__ : Dict = q.T
lowercase__ : int = v.T
# Block i, layer 1 (MLP).
lowercase__ : List[Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , 'pre_mlp_layer_norm' )
lowercase__ : Optional[Any] = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , SCREAMING_SNAKE_CASE_ )
lowercase__ : str = layer_norm
if split_mlp_wi:
lowercase__ : Optional[int] = wi[0].T
lowercase__ : str = wi[1].T
else:
lowercase__ : Union[str, Any] = wi.T
lowercase__ : List[Any] = wo.T
lowercase__ : Tuple = old[
'encoder/relpos_bias/rel_embedding'
].T
lowercase__ : List[str] = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
lowercase__ : int = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'pre_self_attention_layer_norm' )
lowercase__ : Tuple = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'self_attention' )
lowercase__ : Optional[Any] = layer_norm
lowercase__ : Optional[Any] = k.T
lowercase__ : Any = o.T
lowercase__ : Optional[int] = q.T
lowercase__ : Optional[int] = v.T
# Block i, layer 1 (Cross Attention).
lowercase__ : int = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'pre_cross_attention_layer_norm' )
lowercase__ : Optional[Any] = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'encoder_decoder_attention' )
lowercase__ : Optional[int] = layer_norm
lowercase__ : List[Any] = k.T
lowercase__ : Union[str, Any] = o.T
lowercase__ : Any = q.T
lowercase__ : Union[str, Any] = v.T
# Block i, layer 2 (MLP).
lowercase__ : Optional[Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'pre_mlp_layer_norm' )
lowercase__ : List[str] = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = layer_norm
if split_mlp_wi:
lowercase__ : List[Any] = wi[0].T
lowercase__ : str = wi[1].T
else:
lowercase__ : Dict = wi.T
lowercase__ : Union[str, Any] = wo.T
lowercase__ : str = old['decoder/decoder_norm/scale']
lowercase__ : List[str] = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase__ : Any = old['decoder/logits_dense/kernel'].T
return new
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : bool ):
'''simple docstring'''
lowercase__ : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase__ : Any = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase__ : Tuple = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
lowercase__ : List[str] = state_dict['shared.weight']
return state_dict
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
lowercase__ : int = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = convert_tax_to_pytorch(SCREAMING_SNAKE_CASE_ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = make_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : bool = False ):
'''simple docstring'''
lowercase__ : Any = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase__ : Any = TaEncoderModel(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Dict = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE_ )
print('Done' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
snake_case_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 361
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
snake_case_ = False
snake_case_ = True
snake_case_ = False
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
snake_case_ = parser.parse_args()
snake_case_ = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
snake_case_ = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
snake_case_ = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
snake_case_ = reader.read()
snake_case_ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
snake_case_ = UNetaDModel(**config)
else:
snake_case_ = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
snake_case_ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
snake_case_ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
snake_case_ = config[key]
del config[key]
snake_case_ = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
snake_case_ = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
snake_case_ = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
snake_case_ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
snake_case_ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
snake_case_ = param_value
snake_case_ = True
if not has_changed:
snake_case_ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 216
| 0
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = StableUnCLIPPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : str = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def a ( self ):
snake_case_ = 32
snake_case_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
snake_case_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=__A , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
snake_case_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__A , num_layers=1 , )
torch.manual_seed(0 )
snake_case_ = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__A , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
snake_case_ = StableUnCLIPImageNormalizer(embedding_dim=__A )
snake_case_ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
snake_case_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__A , layers_per_block=1 , upcast_attention=__A , use_linear_projection=__A , )
torch.manual_seed(0 )
snake_case_ = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=__A , steps_offset=1 , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL()
snake_case_ = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def a ( self , snake_case , snake_case=0 ):
if str(__A ).startswith('mps' ):
snake_case_ = torch.manual_seed(__A )
else:
snake_case_ = torch.Generator(device=__A ).manual_seed(__A )
snake_case_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def a ( self ):
snake_case_ = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=__A )
def a ( self ):
snake_case_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__A )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
snake_case_ = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case_ = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case_ = pipe('anime turle' , generator=__A , output_type='np' )
snake_case_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__A , __A )
def a ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
snake_case_ = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case_ = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 285
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : str = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : int = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
| 283
| 0
|
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
UpperCAmelCase = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
UpperCAmelCase = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def lowercase ( ) -> Dict:
_UpperCamelCase = calculate_rouge(a__ , a__ , bootstrap_aggregation=a__ , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(a__ , a__ )
_UpperCamelCase = calculate_rouge(a__ , a__ , bootstrap_aggregation=a__ , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def lowercase ( ) -> str:
_UpperCamelCase = '''rougeLsum'''
_UpperCamelCase = calculate_rouge(a__ , a__ , newline_sep=a__ , rouge_keys=[k] )[k]
_UpperCamelCase = calculate_rouge(a__ , a__ , newline_sep=a__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowercase ( ) -> Union[str, Any]:
_UpperCamelCase = ['''rouge1''', '''rouge2''', '''rougeL''']
_UpperCamelCase = calculate_rouge(a__ , a__ , newline_sep=a__ , rouge_keys=a__ )
_UpperCamelCase = calculate_rouge(a__ , a__ , newline_sep=a__ , rouge_keys=a__ )
assert score_sep == score_no_sep
def lowercase ( ) -> Union[str, Any]:
_UpperCamelCase = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
_UpperCamelCase = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(a__ , a__ , newline_sep=a__ ) == calculate_rouge(a__ , a__ , newline_sep=a__ )
def lowercase ( ) -> Dict:
_UpperCamelCase = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
_UpperCamelCase = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
_UpperCamelCase = calculate_rouge(a__ , a__ , rouge_keys=['''rougeLsum'''] , newline_sep=a__ )['''rougeLsum''']
_UpperCamelCase = calculate_rouge(a__ , a__ , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def lowercase ( ) -> int:
_UpperCamelCase = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
_UpperCamelCase = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(a__ , a__ )
_UpperCamelCase = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=a__ )
assert isinstance(a__ , a__ )
| 54
|
"""simple docstring"""
def lowercase ( a__ : Tuple , a__ : str ) -> Tuple:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowercase ( a__ : Optional[int] , a__ : List[str]=0 ) -> Optional[Any]:
return sorted(a__ , key=lambda a__ : x[column] )
def lowercase ( a__ : Optional[int] , a__ : Optional[int] , a__ : Tuple=float('''inf''' ) ) -> int:
for i in range(points_counts - 1 ):
for j in range(i + 1 , a__ ):
_UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_UpperCamelCase = current_dis
return min_dis
def lowercase ( a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Optional[Any]=float('''inf''' ) ) -> str:
for i in range(min(6 , points_counts - 1 ) , a__ ):
for j in range(max(0 , i - 6 ) , a__ ):
_UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_UpperCamelCase = current_dis
return min_dis
def lowercase ( a__ : int , a__ : str , a__ : Any ) -> str:
# base case
if points_counts <= 3:
return dis_between_closest_pair(a__ , a__ )
# recursion
_UpperCamelCase = points_counts // 2
_UpperCamelCase = closest_pair_of_points_sqr(
a__ , points_sorted_on_y[:mid] , a__ )
_UpperCamelCase = closest_pair_of_points_sqr(
a__ , points_sorted_on_y[mid:] , points_counts - mid )
_UpperCamelCase = min(a__ , a__ )
_UpperCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(a__ )
_UpperCamelCase = dis_between_closest_in_strip(
a__ , len(a__ ) , a__ )
return min(a__ , a__ )
def lowercase ( a__ : Dict , a__ : List[Any] ) -> Optional[Any]:
_UpperCamelCase = column_based_sort(a__ , column=0 )
_UpperCamelCase = column_based_sort(a__ , column=1 )
return (
closest_pair_of_points_sqr(
a__ , a__ , a__ )
) ** 0.5
if __name__ == "__main__":
UpperCAmelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 54
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = ["pixel_values"]
def __init__(self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
super().__init__(**UpperCAmelCase )
_snake_case = size if size is not None else {"""shortest_edge""": 224}
_snake_case = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
_snake_case = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_snake_case = get_size_dict(UpperCAmelCase , param_name="""crop_size""" )
_snake_case = do_resize
_snake_case = size
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = resample
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
_snake_case = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" in size:
_snake_case = get_resize_output_image_size(UpperCAmelCase , size["""shortest_edge"""] , default_to_square=UpperCAmelCase )
elif "height" in size and "width" in size:
_snake_case = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
_snake_case = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> int:
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_snake_case = to_numpy_array(UpperCAmelCase )
if do_resize:
_snake_case = self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase )
if do_center_crop:
_snake_case = self.center_crop(UpperCAmelCase , size=UpperCAmelCase )
if do_rescale:
_snake_case = self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase )
if do_normalize:
_snake_case = self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase )
_snake_case = to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase )
return image
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> PIL.Image.Image:
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(UpperCAmelCase , param_name="""crop_size""" )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
_snake_case = make_batched(UpperCAmelCase )
_snake_case = [
[
self._preprocess_image(
image=UpperCAmelCase , do_resize=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , do_center_crop=UpperCAmelCase , crop_size=UpperCAmelCase , do_rescale=UpperCAmelCase , rescale_factor=UpperCAmelCase , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , data_format=UpperCAmelCase , )
for img in video
]
for video in videos
]
_snake_case = {"""pixel_values""": videos}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 341
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__lowerCAmelCase = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__lowerCAmelCase = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
__lowerCAmelCase = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase (self ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = CHRF.CHAR_ORDER , UpperCAmelCase = CHRF.WORD_ORDER , UpperCAmelCase = CHRF.BETA , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , ) -> int:
_snake_case = len(references[0] )
if any(len(UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
_snake_case = [[refs[i] for refs in references] for i in range(UpperCAmelCase )]
_snake_case = CHRF(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_snake_case = sb_chrf.corpus_score(UpperCAmelCase , UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 341
| 1
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
A_ : int = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
A_ : int = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
A_ : Optional[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase( datasets.Metric ):
"""simple docstring"""
def _a ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCamelCase , hypotheses=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase )
}
| 292
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ):
UpperCamelCase_: Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=_lowerCamelCase ).to(_lowerCamelCase )
UpperCamelCase_: Dict = AutoTokenizer.from_pretrained('google/mt5-small' )
UpperCamelCase_: Dict = tokenizer('Hello there' , return_tensors='pt' ).input_ids
UpperCamelCase_: Optional[Any] = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
UpperCamelCase_: int = model(input_ids.to(_lowerCamelCase ) , labels=labels.to(_lowerCamelCase ) ).loss
UpperCamelCase_: Tuple = -(labels.shape[-1] * loss.item())
UpperCamelCase_: Any = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 292
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : List[Any] = logging.get_logger(__name__)
__lowercase : str = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "mobilenet_v2"
def __init__( self , __a=3 , __a=224 , __a=1.0 , __a=8 , __a=8 , __a=6 , __a=32 , __a=True , __a=True , __a="relu6" , __a=True , __a=0.8 , __a=0.02 , __a=0.001 , __a=255 , **__a , ):
'''simple docstring'''
super().__init__(**__a )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
__a : Any = num_channels
__a : Dict = image_size
__a : Optional[Any] = depth_multiplier
__a : List[str] = depth_divisible_by
__a : List[str] = min_depth
__a : Any = expand_ratio
__a : Optional[Any] = output_stride
__a : str = first_layer_is_expansion
__a : Optional[Any] = finegrained_output
__a : Optional[Any] = hidden_act
__a : List[Any] = tf_padding
__a : Optional[int] = classifier_dropout_prob
__a : Union[str, Any] = initializer_range
__a : int = layer_norm_eps
__a : str = semantic_loss_ignore_index
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = version.parse("1.11" )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 1E-4
| 27
|
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase : Any = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
lowerCAmelCase : Any = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
lowerCAmelCase : Any = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def _lowerCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None , _a="warn" , ):
"""simple docstring"""
lowerCamelCase = recall_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a , zero_division=_a , )
return {"recall": float(_a ) if score.size == 1 else score}
| 291
| 0
|
"""simple docstring"""
import os
from pathlib import Path
def _A ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
A = Path(_a ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
A = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _a , with_cuda=_a , extra_include_paths=[str(_a )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 77
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
UpperCAmelCase =parser.parse_args()
UpperCAmelCase =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase =CLIPImageProcessor()
UpperCAmelCase =CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
UpperCAmelCase =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 77
| 1
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@require_torch
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
lowerCAmelCase = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
lowerCAmelCase = load_dataset('ashraq/esc50' )
lowerCAmelCase = dataset['train']['audio'][-1]['array']
lowerCAmelCase = audio_classifier(UpperCAmelCase__ , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@slow
@require_torch
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
lowerCAmelCase = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
lowerCAmelCase = load_dataset('ashraq/esc50' )
lowerCAmelCase = dataset['train']['audio'][-1]['array']
lowerCAmelCase = audio_classifier(UpperCAmelCase__ , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
] , )
lowerCAmelCase = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
lowerCAmelCase = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
pass
| 4
|
"""simple docstring"""
import math
class lowerCamelCase :
'''simple docstring'''
def lowerCAmelCase_ ( self: Tuple , snake_case: list[list[float]] , snake_case: list[int] ) -> int:
snake_case_ :Any = 0.0
snake_case_ :Tuple = 0.0
for i in range(len(snake_case ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase_ ( self: Optional[int] , snake_case: list[list[int | float]] , snake_case: list[int] , snake_case: int , snake_case: float ) -> list[list[int | float]]:
for i in range(len(snake_case ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def A_ ( ):
'''simple docstring'''
snake_case_ :Dict = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case_ :List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case_ :Optional[Any] = SelfOrganizingMap()
snake_case_ :Dict = 3
snake_case_ :Dict = 0.5
for _ in range(_lowercase ):
for j in range(len(_lowercase ) ):
# training sample
snake_case_ :List[Any] = training_samples[j]
# Compute the winning vector
snake_case_ :Optional[int] = self_organizing_map.get_winner(_lowercase, _lowercase )
# Update the winning vector
snake_case_ :List[str] = self_organizing_map.update(_lowercase, _lowercase, _lowercase, _lowercase )
# classify test sample
snake_case_ :str = [0, 0, 0, 1]
snake_case_ :List[Any] = self_organizing_map.get_winner(_lowercase, _lowercase )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 66
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowercase = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356
|
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """char"""
UpperCAmelCase : Optional[Any] = """bpe"""
UpperCAmelCase : Optional[Any] = """wp"""
__lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = ["""image_processor""", """char_tokenizer"""]
UpperCAmelCase : Optional[Any] = """ViTImageProcessor"""
UpperCAmelCase : List[Any] = """MgpstrTokenizer"""
def __init__( self : List[Any] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : str):
a : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
a : List[str] = kwargs.pop("feature_extractor")
a : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
a : Union[str, Any] = tokenizer
a : int = AutoTokenizer.from_pretrained("gpt2")
a : str = AutoTokenizer.from_pretrained("bert-base-uncased")
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
def __call__( self : str , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : int):
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process.")
if images is not None:
a : List[str] = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase)
if text is not None:
a : Optional[Any] = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase)
if text is None:
return inputs
elif images is None:
return encodings
else:
a : Any = encodings["input_ids"]
return inputs
def __snake_case ( self : List[Any] , __UpperCAmelCase : List[str]):
a , a , a : Tuple = sequences
a : Optional[int] = char_preds.size(0)
a , a : Dict = self._decode_helper(__UpperCAmelCase , "char")
a , a : Dict = self._decode_helper(__UpperCAmelCase , "bpe")
a , a : Union[str, Any] = self._decode_helper(__UpperCAmelCase , "wp")
a : Any = []
a : Union[str, Any] = []
for i in range(__UpperCAmelCase):
a : Any = [char_scores[i], bpe_scores[i], wp_scores[i]]
a : Optional[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
a : List[str] = scores.index(max(__UpperCAmelCase))
final_strs.append(strs[max_score_index])
final_scores.append(scores[max_score_index])
a : Dict = {}
a : List[str] = final_strs
a : str = final_scores
a : int = char_strs
a : int = bpe_strs
a : Tuple = wp_strs
return out
def __snake_case ( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str]):
if format == DecodeType.CHARACTER:
a : int = self.char_decode
a : int = 1
a : Dict = "[s]"
elif format == DecodeType.BPE:
a : List[str] = self.bpe_decode
a : List[str] = 2
a : int = "#"
elif format == DecodeType.WORDPIECE:
a : Union[str, Any] = self.wp_decode
a : List[str] = 102
a : int = "[SEP]"
else:
raise ValueError(f'''Format {format} is not supported.''')
a , a : str = [], []
a : Optional[int] = pred_logits.size(0)
a : List[str] = pred_logits.size(1)
a , a : Tuple = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase)
a : List[str] = preds_index.view(-1 , __UpperCAmelCase)[:, 1:]
a : Any = decoder(__UpperCAmelCase)
a , a : Union[str, Any] = torch.nn.functional.softmax(__UpperCAmelCase , dim=2).max(dim=2)
a : Union[str, Any] = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase):
a : str = preds_str[index].find(__UpperCAmelCase)
a : Optional[Any] = preds_str[index][:pred_eos]
a : Optional[int] = preds_index[index].cpu().tolist()
a : Optional[int] = pred_index.index(__UpperCAmelCase) if eos_token in pred_index else -1
a : List[str] = preds_max_prob[index][: pred_eos_index + 1]
a : int = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase)
conf_scores.append(__UpperCAmelCase)
return dec_strs, conf_scores
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Any):
a : Dict = [seq.replace(" " , "") for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase)]
return decode_strs
def __snake_case ( self : Optional[int] , __UpperCAmelCase : List[str]):
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int):
a : Any = [seq.replace(" " , "") for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase)]
return decode_strs
| 226
| 0
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
_lowerCamelCase : str = 3
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
print('''Generating primitive root of p''' )
while True:
A_ : Union[str, Any] = random.randrange(3 , _UpperCAmelCase )
if pow(_UpperCAmelCase , 2 , _UpperCAmelCase ) == 1:
continue
if pow(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) == 1:
continue
return g
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
print('''Generating prime p...''' )
A_ : Optional[int] = rabin_miller.generate_large_prime(_UpperCAmelCase ) # select large prime number.
A_ : List[Any] = primitive_root(_UpperCAmelCase ) # one primitive root on modulo p.
A_ : Optional[int] = random.randrange(3 , _UpperCAmelCase ) # private_key -> have to be greater than 2 for safety.
A_ : List[str] = cryptomath.find_mod_inverse(pow(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
A_ : List[Any] = (key_size, e_a, e_a, p)
A_ : Dict = (key_size, d)
return public_key, private_key
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
A_ , A_ : int = generate_key(_UpperCAmelCase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , '''w''' ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , '''w''' ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def lowercase_ ( ):
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''elgamal''' , 2048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 167
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = IFInpaintingSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 316
| 0
|
'''simple docstring'''
lowerCAmelCase__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = set()
# keep track of all the paths to be checked
__lowercase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__lowercase = queue.pop(0 )
# get the last node from the path
__lowercase = path[-1]
if node not in explored:
__lowercase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__lowercase = list(A__ )
new_path.append(A__ )
queue.append(A__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A__ )
# in case there's no path between the 2 nodes
return []
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__lowercase = [start]
__lowercase = set(A__ )
# Keep tab on distances from `start` node.
__lowercase = {start: 0, target: -1}
while queue:
__lowercase = queue.pop(0 )
if node == target:
__lowercase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A__ )
queue.append(A__ )
__lowercase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 52
|
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
__lowercase = [2, 4, 6, 8, 1_0, 1_2]
__lowercase = 1_0_0
self.assertEqual(kp.calc_profit(lowercase__ ,lowercase__ ,lowercase__ ) ,2_1_0 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
self.assertRaisesRegex(lowercase__ ,'''max_weight must greater than zero.''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
self.assertRaisesRegex(lowercase__ ,'''Weight can not be negative.''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.assertRaisesRegex(lowercase__ ,'''Profit can not be negative.''' )
def SCREAMING_SNAKE_CASE ( self : int ):
self.assertRaisesRegex(lowercase__ ,'''max_weight must greater than zero.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.assertRaisesRegex(
lowercase__ ,'''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 52
| 1
|
__lowercase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = set()
# keep track of all the paths to be checked
__UpperCamelCase :Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__UpperCamelCase :Optional[Any] = queue.pop(0 )
# get the last node from the path
__UpperCamelCase :List[Any] = path[-1]
if node not in explored:
__UpperCamelCase :str = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__UpperCamelCase :List[str] = list(SCREAMING_SNAKE_CASE )
new_path.append(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(SCREAMING_SNAKE_CASE )
# in case there's no path between the 2 nodes
return []
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__UpperCamelCase :int = [start]
__UpperCamelCase :List[str] = set(SCREAMING_SNAKE_CASE )
# Keep tab on distances from `start` node.
__UpperCamelCase :Any = {start: 0, target: -1}
while queue:
__UpperCamelCase :Optional[int] = queue.pop(0 )
if node == target:
__UpperCamelCase :Tuple = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 43
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : int = None
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase=False , __lowercase=False , **__lowercase , ) -> List[str]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :Any = getattr(__lowercase , pre_tok_state.pop('''type'''))
__UpperCamelCase :str = add_prefix_space
__UpperCamelCase :List[str] = pre_tok_class(**__lowercase)
__UpperCamelCase :Tuple = add_prefix_space
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :Tuple = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._batch_encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :List[str] = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[int]:
__UpperCamelCase :str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase) + [self.eos_token_id])
if len(__lowercase) > self.model_max_length:
__UpperCamelCase :Any = input_ids[-self.model_max_length :]
return input_ids
| 43
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
A : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowerCAmelCase__ ( lowerCamelCase : Optional[int] ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any]=8 ):
_A : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCAmelCase__ ( lowerCamelCase : List[str] ,lowerCamelCase : Dict=512 ,lowerCamelCase : int=512 ):
_A : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_A : Optional[Any] = np.array(pil_image.convert('RGB' ) )
_A : Dict = arr.astype(np.floataa ) / 127.5 - 1
_A : str = np.transpose(lowerCAmelCase__ ,[2, 0, 1] )
_A : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 )
return image
class __lowerCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , ):
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , movq=SCREAMING_SNAKE_CASE_ , )
_A : List[Any] = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A ( self : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int]):
# get the original timestep using init_timestep
_A : str = min(int(num_inference_steps * strength) , SCREAMING_SNAKE_CASE_)
_A : List[str] = max(num_inference_steps - init_timestep , 0)
_A : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict=None):
if not isinstance(SCREAMING_SNAKE_CASE_ , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(SCREAMING_SNAKE_CASE_)}')
_A : str = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_)
_A : List[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_A : Optional[int] = image
else:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and len(SCREAMING_SNAKE_CASE_) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_)}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.')
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
_A : List[Any] = [
self.movq.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(SCREAMING_SNAKE_CASE_)
]
_A : Union[str, Any] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0)
else:
_A : Any = self.movq.encode(SCREAMING_SNAKE_CASE_).latent_dist.sample(SCREAMING_SNAKE_CASE_)
_A : Dict = self.movq.config.scaling_factor * init_latents
_A : List[str] = torch.cat([init_latents] , dim=0)
_A : Any = init_latents.shape
_A : int = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_)
# get latents
_A : Optional[Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
_A : str = init_latents
return latents
def A ( self : List[str] , SCREAMING_SNAKE_CASE : Any=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`')
_A : Any = torch.device(F'cuda:{gpu_id}')
_A : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def A ( self : str , SCREAMING_SNAKE_CASE : Tuple=0):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0'):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.')
_A : int = torch.device(F'cuda:{gpu_id}')
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=SCREAMING_SNAKE_CASE_)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A : Any = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A : Dict = cpu_offload_with_hook(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prev_module_hook=SCREAMING_SNAKE_CASE_)
# We'll offload the last model manually.
_A : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A ( self : int):
if not hasattr(self.unet , '_hf_hook'):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE_ , '_hf_hook')
and hasattr(module._hf_hook , 'execution_device')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE_)
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict = 512 , SCREAMING_SNAKE_CASE : str = 512 , SCREAMING_SNAKE_CASE : Union[str, Any] = 100 , SCREAMING_SNAKE_CASE : Union[str, Any] = 4.0 , SCREAMING_SNAKE_CASE : List[str] = 0.3 , SCREAMING_SNAKE_CASE : Union[str, Any] = 1 , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = "pil" , SCREAMING_SNAKE_CASE : Dict = True , ):
_A : int = self._execution_device
_A : Any = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
_A : Optional[Any] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0)
_A : int = image_embeds.shape[0]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
_A : Dict = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0)
if do_classifier_free_guidance:
_A : int = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0)
_A : Tuple = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0)
_A : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE_)
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
_A : str = [image]
if not all(isinstance(SCREAMING_SNAKE_CASE_ , (PIL.Image.Image, torch.Tensor)) for i in image):
raise ValueError(
F'Input is in incorrect format: {[type(SCREAMING_SNAKE_CASE_) for i in image]}. Currently, we only support PIL image and pytorch tensor')
_A : Optional[int] = torch.cat([prepare_image(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for i in image] , dim=0)
_A : List[Any] = image.to(dtype=image_embeds.dtype , device=SCREAMING_SNAKE_CASE_)
_A : Any = self.movq.encode(SCREAMING_SNAKE_CASE_)["""latents"""]
_A : List[Any] = latents.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0)
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_)
_A : str = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
_A : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt)
_A : Any = downscale_height_and_width(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.movq_scale_factor)
_A : int = self.prepare_latents(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , image_embeds.dtype , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_)):
# expand the latents if we are doing classifier free guidance
_A : str = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_A : int = {"""image_embeds""": image_embeds}
_A : Union[str, Any] = self.unet(
sample=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , added_cond_kwargs=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
if do_classifier_free_guidance:
_A : Any = noise_pred.split(latents.shape[1] , dim=1)
_A : List[str] = noise_pred.chunk(2)
_A : List[str] = variance_pred.chunk(2)
_A : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , 'variance_type')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A : Any = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
_A : Optional[int] = self.scheduler.step(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , )[0]
# post-processing
_A : List[Any] = self.movq.decode(SCREAMING_SNAKE_CASE_ , force_not_quantize=SCREAMING_SNAKE_CASE_)["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}')
if output_type in ["np", "pil"]:
_A : Tuple = image * 0.5 + 0.5
_A : Dict = image.clamp(0 , 1)
_A : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
_A : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_)
| 366
|
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __lowerCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a = StableDiffusionControlNetImgaImgPipeline
a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A ( self : Tuple):
torch.manual_seed(0)
_A : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0)
_A : Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0)
_A : Any = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , )
torch.manual_seed(0)
_A : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
_A : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A : Any = CLIPTextModel(SCREAMING_SNAKE_CASE)
_A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_A : Tuple = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple=0):
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
_A : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE)
else:
_A : Tuple = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = 2
_A : Tuple = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE , device=torch.device(SCREAMING_SNAKE_CASE) , )
_A : Tuple = floats_tensor(control_image.shape , rng=random.Random(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1)[0]
_A : Union[str, Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE)).convert('RGB').resize((64, 64))
_A : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def A ( self : str):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A ( self : Tuple):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def A ( self : int):
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
class __lowerCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a = StableDiffusionControlNetImgaImgPipeline
a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def A ( self : List[str]):
torch.manual_seed(0)
_A : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0)
def init_weights(SCREAMING_SNAKE_CASE : Union[str, Any]):
if isinstance(SCREAMING_SNAKE_CASE , torch.nn.Convad):
torch.nn.init.normal(m.weight)
m.bias.data.fill_(1.0)
_A : int = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(SCREAMING_SNAKE_CASE)
torch.manual_seed(0)
_A : str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(SCREAMING_SNAKE_CASE)
torch.manual_seed(0)
_A : List[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , )
torch.manual_seed(0)
_A : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
_A : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE)
_A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_A : List[str] = MultiControlNetModel([controlneta, controlneta])
_A : List[str] = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str]=0):
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
_A : Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE)
else:
_A : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = 2
_A : List[str] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE , device=torch.device(SCREAMING_SNAKE_CASE) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE , device=torch.device(SCREAMING_SNAKE_CASE) , ),
]
_A : str = floats_tensor(control_image[0].shape , rng=random.Random(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE)
_A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1)[0]
_A : Optional[int] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE)).convert('RGB').resize((64, 64))
_A : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def A ( self : Tuple):
_A : List[str] = self.get_dummy_components()
_A : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE)
pipe.to(SCREAMING_SNAKE_CASE)
_A : int = 10.0
_A : Union[str, Any] = 4
_A : str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_A : List[Any] = steps
_A : List[str] = scale
_A : int = pipe(**SCREAMING_SNAKE_CASE)[0]
_A : Any = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = steps
_A : Any = scale
_A : Dict = pipe(**SCREAMING_SNAKE_CASE , control_guidance_start=0.1 , control_guidance_end=0.2)[0]
_A : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_A : str = steps
_A : List[Any] = scale
_A : int = pipe(**SCREAMING_SNAKE_CASE , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7])[0]
_A : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_A : Tuple = steps
_A : Tuple = scale
_A : str = pipe(**SCREAMING_SNAKE_CASE , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8])[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
def A ( self : Optional[Any]):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A ( self : Any):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def A ( self : Dict):
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
def A ( self : str):
_A : Optional[int] = self.get_dummy_components()
_A : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE)
pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(SCREAMING_SNAKE_CASE)
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[Any]):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any):
_A : Dict = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny')
_A : List[Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=SCREAMING_SNAKE_CASE , controlnet=SCREAMING_SNAKE_CASE)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_A : List[Any] = torch.Generator(device='cpu').manual_seed(0)
_A : List[Any] = 'evil space-punk bird'
_A : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png').resize((512, 512))
_A : List[str] = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png').resize((512, 512))
_A : Dict = pipe(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , control_image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , output_type='np' , num_inference_steps=50 , strength=0.6 , )
_A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
_A : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy')
assert np.abs(expected_image - image).max() < 9e-2
| 227
| 0
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __magic_name__ ( __lowerCAmelCase):
A: torch.FloatTensor
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase):
@register_to_config
def __init__( self : Union[str, Any] , lowerCamelCase__ : int = 32 , lowerCamelCase__ : int = 64 , lowerCamelCase__ : int = 20 , lowerCamelCase__ : int = 768 , lowerCamelCase__ : Optional[Any]=77 , lowerCamelCase__ : Optional[int]=4 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : str = "silu" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = "linear" , lowerCamelCase__ : Optional[str] = "prd" , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
UpperCamelCase__ : List[Any] = num_attention_heads
UpperCamelCase__ : Optional[Any] = attention_head_dim
UpperCamelCase__ : List[str] = num_attention_heads * attention_head_dim
UpperCamelCase__ : Union[str, Any] = additional_embeddings
UpperCamelCase__ : Union[str, Any] = time_embed_dim or inner_dim
UpperCamelCase__ : int = embedding_proj_dim or embedding_dim
UpperCamelCase__ : Optional[Any] = clip_embed_dim or embedding_dim
UpperCamelCase__ : Dict = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 )
UpperCamelCase__ : List[Any] = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if embedding_proj_norm_type is None:
UpperCamelCase__ : int = None
elif embedding_proj_norm_type == "layer":
UpperCamelCase__ : Optional[int] = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
UpperCamelCase__ : Dict = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if encoder_hid_proj_type is None:
UpperCamelCase__ : List[Any] = None
elif encoder_hid_proj_type == "linear":
UpperCamelCase__ : List[str] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
UpperCamelCase__ : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) )
if added_emb_type == "prd":
UpperCamelCase__ : Any = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) )
elif added_emb_type is None:
UpperCamelCase__ : Union[str, Any] = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
UpperCamelCase__ : str = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='''gelu''' , attention_bias=lowerCamelCase__ , )
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
UpperCamelCase__ : int = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
UpperCamelCase__ : int = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
UpperCamelCase__ : Optional[Any] = nn.LayerNorm(lowerCamelCase__ )
UpperCamelCase__ : List[str] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0000.0 )
causal_attention_mask.triu_(1 )
UpperCamelCase__ : Union[str, Any] = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , lowerCamelCase__ , persistent=lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
UpperCamelCase__ : Optional[int] = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = {}
def fn_recursive_add_processors(lowerCamelCase__ : str , lowerCamelCase__ : torch.nn.Module , lowerCamelCase__ : Dict[str, AttentionProcessor] ):
if hasattr(lowerCamelCase__ , '''set_processor''' ):
UpperCamelCase__ : Optional[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase__ , lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return processors
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowerCamelCase__ : str , lowerCamelCase__ : torch.nn.Module , lowerCamelCase__ : Dict ):
if hasattr(lowerCamelCase__ , '''set_processor''' ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase__ , lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Union[torch.Tensor, float, int] , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[torch.BoolTensor] = None , lowerCamelCase__ : bool = True , ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = hidden_states.shape[0]
UpperCamelCase__ : List[Any] = timestep
if not torch.is_tensor(lowerCamelCase__ ):
UpperCamelCase__ : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
UpperCamelCase__ : Optional[int] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase__ : List[str] = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase__ : int = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCamelCase__ : List[str] = timesteps_projected.to(dtype=self.dtype )
UpperCamelCase__ : Optional[Any] = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
UpperCamelCase__ : Dict = self.embedding_proj_norm(lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCamelCase__ : int = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
UpperCamelCase__ : Optional[int] = self.proj_in(lowerCamelCase__ )
UpperCamelCase__ : int = self.positional_embedding.to(hidden_states.dtype )
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCamelCase__ : Tuple = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCamelCase__ : Dict = hidden_states[:, None, :]
UpperCamelCase__ : Optional[int] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCamelCase__ : int = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 )
additional_embeds.append(lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.cat(
lowerCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCamelCase__ : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCamelCase__ : List[str] = F.pad(
lowerCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCamelCase__ : int = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCamelCase__ : Union[str, Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
UpperCamelCase__ : Any = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 )
UpperCamelCase__ : Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCamelCase__ : List[str] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCamelCase__ : List[str] = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
UpperCamelCase__ : Any = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
UpperCamelCase__ : Optional[int] = hidden_states[:, -1]
else:
UpperCamelCase__ : int = hidden_states[:, additional_embeddings_len:]
UpperCamelCase__ : List[str] = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Tuple = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 146
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__UpperCamelCase : Dict = trt.Logger(trt.Logger.WARNING)
__UpperCamelCase : Union[str, Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__UpperCamelCase : int = logging.getLogger(__name__)
__UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__UpperCamelCase : Dict = parser.parse_args()
if args.tokenizer_name:
__UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__UpperCamelCase : Union[str, Any] = args.per_device_eval_batch_size
__UpperCamelCase : List[str] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__UpperCamelCase : Tuple = True
__UpperCamelCase : Union[str, Any] = "temp_engine/bert-fp32.engine"
if args.fpaa:
__UpperCamelCase : str = "temp_engine/bert-fp16.engine"
if args.inta:
__UpperCamelCase : int = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__UpperCamelCase : List[str] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__UpperCamelCase : Optional[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__UpperCamelCase : Optional[int] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__UpperCamelCase : List[Any] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__UpperCamelCase : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__UpperCamelCase : int = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
UpperCamelCase__ : str = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
UpperCamelCase__ : Optional[Any] = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , SCREAMING_SNAKE_CASE )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , SCREAMING_SNAKE_CASE )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , SCREAMING_SNAKE_CASE )
# start time
UpperCamelCase__ : Union[str, Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(SCREAMING_SNAKE_CASE ) for d_inp in d_inputs] + [int(SCREAMING_SNAKE_CASE ), int(SCREAMING_SNAKE_CASE )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Synchronize the stream and take time
stream.synchronize()
# end time
UpperCamelCase__ : List[Any] = time.time()
UpperCamelCase__ : int = end_time - start_time
UpperCamelCase__ : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__UpperCamelCase : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__UpperCamelCase : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__UpperCamelCase : str = raw_datasets["validation"].column_names
__UpperCamelCase : List[Any] = "question" if "question" in column_names else column_names[0]
__UpperCamelCase : Dict = "context" if "context" in column_names else column_names[1]
__UpperCamelCase : str = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__UpperCamelCase : List[Any] = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
__UpperCamelCase : List[str] = min(args.max_seq_length, tokenizer.model_max_length)
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
UpperCamelCase__ : Dict = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
UpperCamelCase__ : List[Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=SCREAMING_SNAKE_CASE , stride=args.doc_stride , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
UpperCamelCase__ : int = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
UpperCamelCase__ : List[Any] = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
UpperCamelCase__ : Dict = tokenized_examples.sequence_ids(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
UpperCamelCase__ : Optional[Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
UpperCamelCase__ : Any = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
__UpperCamelCase : str = raw_datasets["validation"]
# Validation Feature Creation
__UpperCamelCase : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__UpperCamelCase : Union[str, Any] = default_data_collator
__UpperCamelCase : List[str] = eval_dataset.remove_columns(["example_id", "offset_mapping"])
__UpperCamelCase : Optional[int] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]="eval" ):
"""simple docstring"""
UpperCamelCase__ : List[str] = postprocess_qa_predictions(
examples=SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , predictions=SCREAMING_SNAKE_CASE , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=SCREAMING_SNAKE_CASE , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
UpperCamelCase__ : List[str] = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
UpperCamelCase__ : Optional[Any] = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
UpperCamelCase__ : int = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=SCREAMING_SNAKE_CASE , label_ids=SCREAMING_SNAKE_CASE )
__UpperCamelCase : int = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
return trt.volume(engine.get_binding_shape(SCREAMING_SNAKE_CASE ) ) * engine.get_binding_dtype(SCREAMING_SNAKE_CASE ).itemsize
# Allocate device memory for inputs and outputs.
__UpperCamelCase : Any = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__UpperCamelCase : List[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__UpperCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__UpperCamelCase : Any = cuda.mem_alloc(h_outputa.nbytes)
__UpperCamelCase : List[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__UpperCamelCase : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.per_device_eval_batch_size}")
__UpperCamelCase : str = 0.0
__UpperCamelCase : int = 0
__UpperCamelCase : List[Any] = timeit.default_timer()
__UpperCamelCase : List[str] = None
for step, batch in enumerate(eval_dataloader):
__UpperCamelCase , __UpperCamelCase : Optional[Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__UpperCamelCase , __UpperCamelCase : Optional[Any] = outputs
__UpperCamelCase : List[str] = torch.tensor(start_logits)
__UpperCamelCase : Optional[Any] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__UpperCamelCase : int = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__UpperCamelCase : List[Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__UpperCamelCase : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__UpperCamelCase : Optional[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__UpperCamelCase : int = nested_truncate(all_preds, len(eval_dataset))
__UpperCamelCase : Dict = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1000))
logger.info("Total Number of Inference = %d", niter)
__UpperCamelCase : int = post_processing_function(eval_examples, eval_dataset, all_preds)
__UpperCamelCase : Union[str, Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}")
| 146
| 1
|
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCamelCase_ = 4
lowerCamelCase_ = 3
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
pass
def __lowercase ( __lowercase ) -> List[str]:
'''simple docstring'''
for shard in shards:
for i in range(__lowercase ):
yield {"i": i, "shard": shard}
def __lowercase ( ) -> List[str]:
'''simple docstring'''
_A = int(os.environ["RANK"] )
_A = int(os.environ["WORLD_SIZE"] )
_A = ArgumentParser()
parser.add_argument("--streaming" , type=__lowercase )
parser.add_argument("--local_rank" , type=__lowercase )
parser.add_argument("--num_workers" , type=__lowercase , default=0 )
_A = parser.parse_args()
_A = args.streaming
_A = args.num_workers
_A = {"shards": [F'''shard_{shard_idx}''' for shard_idx in range(__lowercase )]}
_A = IterableDataset.from_generator(__lowercase , gen_kwargs=__lowercase )
if not streaming:
_A = Dataset.from_list(list(__lowercase ) )
_A = split_dataset_by_node(__lowercase , rank=__lowercase , world_size=__lowercase )
_A = torch.utils.data.DataLoader(__lowercase , num_workers=__lowercase )
_A = NUM_SHARDS * NUM_ITEMS_PER_SHARD
_A = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
_A = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 174
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = '''T5Config'''
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> jnp.ndarray:
'''simple docstring'''
_A = jnp.zeros_like(__lowercase )
_A = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_A = shifted_input_ids.at[:, 0].set(__lowercase )
_A = jnp.where(shifted_input_ids == -100 , __lowercase , __lowercase )
return shifted_input_ids
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mt5'''
snake_case = MTaConfig
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mt5'''
snake_case = MTaConfig
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mt5'''
snake_case = MTaConfig
| 174
| 1
|
"""simple docstring"""
UpperCAmelCase : Optional[Any] = tuple[float, float, float]
UpperCAmelCase : Optional[Any] = tuple[float, float, float]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Vectorad:
'''simple docstring'''
lowercase_ = end_pointa[0] - end_pointa[0]
lowercase_ = end_pointa[1] - end_pointa[1]
lowercase_ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Vectorad:
'''simple docstring'''
lowercase_ = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowercase_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowercase_ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> bool:
'''simple docstring'''
return tuple(round(__lowerCAmelCase , __lowerCAmelCase ) for x in vector ) == (0, 0, 0)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 10 ) -> bool:
'''simple docstring'''
lowercase_ = create_vector(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = create_vector(__lowerCAmelCase , __lowerCAmelCase )
return is_zero_vector(get_ad_vectors_cross(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
| 136
|
"""simple docstring"""
from math import pi, sqrt, tan
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError("""surface_area_cube() only accepts non-negative values""" )
return 6 * side_length**2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError("""surface_area_cuboid() only accepts non-negative values""" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("""surface_area_sphere() only accepts non-negative values""" )
return 4 * pi * radius**2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" )
return 3 * pi * radius**2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("""surface_area_cone() only accepts non-negative values""" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"""surface_area_conical_frustum() only accepts non-negative values""" )
lowercase_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("""surface_area_cylinder() only accepts non-negative values""" )
return 2 * pi * radius * (height + radius)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError("""surface_area_torus() only accepts non-negative values""" )
if torus_radius < tube_radius:
raise ValueError(
"""surface_area_torus() does not support spindle or self intersecting tori""" )
return 4 * pow(__lowerCAmelCase , 2 ) * torus_radius * tube_radius
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError("""area_rectangle() only accepts non-negative values""" )
return length * width
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError("""area_square() only accepts non-negative values""" )
return side_length**2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("""area_triangle() only accepts non-negative values""" )
return (base * height) / 2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("""Given three sides do not form a triangle""" )
lowercase_ = (sidea + sidea + sidea) / 2
lowercase_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("""area_parallelogram() only accepts non-negative values""" )
return base * height
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError("""area_trapezium() only accepts non-negative values""" )
return 1 / 2 * (basea + basea) * height
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("""area_circle() only accepts non-negative values""" )
return pi * radius**2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError("""area_ellipse() only accepts non-negative values""" )
return pi * radius_x * radius_y
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("""area_rhombus() only accepts non-negative values""" )
return 1 / 2 * diagonal_a * diagonal_a
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or sides < 3:
raise ValueError(
"""area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides""" )
elif length < 0:
raise ValueError(
"""area_reg_polygon() only accepts non-negative values as \
length of a side""" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(F"Rectangle: {area_rectangle(10, 20) = }")
print(F"Square: {area_square(10) = }")
print(F"Triangle: {area_triangle(10, 10) = }")
print(F"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(F"Parallelogram: {area_parallelogram(10, 20) = }")
print(F"Rhombus: {area_rhombus(10, 20) = }")
print(F"Trapezium: {area_trapezium(10, 20, 30) = }")
print(F"Circle: {area_circle(20) = }")
print(F"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(F"Cube: {surface_area_cube(20) = }")
print(F"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(F"Sphere: {surface_area_sphere(20) = }")
print(F"Hemisphere: {surface_area_hemisphere(20) = }")
print(F"Cone: {surface_area_cone(10, 20) = }")
print(F"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(F"Cylinder: {surface_area_cylinder(10, 20) = }")
print(F"Torus: {surface_area_torus(20, 10) = }")
print(F"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(F"Square: {area_reg_polygon(4, 10) = }")
print(F"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 136
| 1
|
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _snake_case ( lowerCamelCase__ ):
lowerCAmelCase_ : Dict = (EulerDiscreteScheduler,)
lowerCAmelCase_ : Tuple = 10
def lowerCAmelCase__ ( self , **a__ ) -> Any:
'''simple docstring'''
snake_case_ = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**__A )
return config
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__A )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__A )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ = torch.manual_seed(0 )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ = sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ = scheduler.scale_model_input(__A , __A )
snake_case_ = model(__A , __A )
snake_case_ = scheduler.step(__A , __A , __A , generator=__A )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(__A ) )
snake_case_ = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(prediction_type="v_prediction" )
snake_case_ = scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ = torch.manual_seed(0 )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ = sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ = scheduler.scale_model_input(__A , __A )
snake_case_ = model(__A , __A )
snake_case_ = scheduler.step(__A , __A , __A , generator=__A )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(__A ) )
snake_case_ = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps , device=__A )
snake_case_ = torch.manual_seed(0 )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case_ = sample.to(__A )
for t in scheduler.timesteps:
snake_case_ = scheduler.scale_model_input(__A , __A )
snake_case_ = model(__A , __A )
snake_case_ = scheduler.step(__A , __A , __A , generator=__A )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(__A ) )
snake_case_ = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**__A , use_karras_sigmas=__A )
scheduler.set_timesteps(self.num_inference_steps , device=__A )
snake_case_ = torch.manual_seed(0 )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case_ = sample.to(__A )
for t in scheduler.timesteps:
snake_case_ = scheduler.scale_model_input(__A , __A )
snake_case_ = model(__A , __A )
snake_case_ = scheduler.step(__A , __A , __A , generator=__A )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(__A ) )
snake_case_ = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
| 360
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : List[str] = IFPipeline
lowerCAmelCase_ : int = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
lowerCAmelCase_ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ : List[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return self._get_dummy_components()
def lowerCAmelCase__ ( self , a__ , a__=0 ) -> str:
'''simple docstring'''
if str(a__ ).startswith("mps" ):
snake_case_ = torch.manual_seed(a__ )
else:
snake_case_ = torch.Generator(device=a__ ).manual_seed(a__ )
snake_case_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self._test_save_load_local()
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
snake_case_ = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=a__ , tokenizer=a__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
snake_case_ , snake_case_ = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
snake_case_ = None
snake_case_ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(a__ , a__ , a__ , a__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
snake_case_ = IFImgaImgPipeline(**pipe_a.components )
snake_case_ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(a__ , a__ , a__ , a__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
snake_case_ = IFInpaintingPipeline(**pipe_a.components )
snake_case_ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(a__ , a__ , a__ , a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> Dict:
'''simple docstring'''
_start_torch_memory_measurement()
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , num_inference_steps=2 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (64, 64, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(a__ , a__ )
# pipeline 2
_start_torch_memory_measurement()
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a__ , a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> Dict:
'''simple docstring'''
_start_torch_memory_measurement()
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , num_inference_steps=2 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (64, 64, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(a__ , a__ )
# pipeline 2
_start_torch_memory_measurement()
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , original_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a__ , a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> str:
'''simple docstring'''
_start_torch_memory_measurement()
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(a__ )
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , mask_image=a__ , num_inference_steps=2 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (64, 64, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(a__ , a__ )
# pipeline 2
_start_torch_memory_measurement()
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(a__ )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , mask_image=a__ , original_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a__ , a__ )
def UpperCamelCase_( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 92
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : int = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "time_series_transformer"
UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self: Union[str, Any] , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[int] = None , UpperCamelCase: str = "student_t" , UpperCamelCase: str = "nll" , UpperCamelCase: int = 1 , UpperCamelCase: List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCamelCase: Optional[Union[str, bool]] = "mean" , UpperCamelCase: int = 0 , UpperCamelCase: int = 0 , UpperCamelCase: int = 0 , UpperCamelCase: int = 0 , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: int = 32 , UpperCamelCase: int = 32 , UpperCamelCase: int = 2 , UpperCamelCase: int = 2 , UpperCamelCase: int = 2 , UpperCamelCase: int = 2 , UpperCamelCase: bool = True , UpperCamelCase: str = "gelu" , UpperCamelCase: int = 64 , UpperCamelCase: float = 0.1 , UpperCamelCase: float = 0.1 , UpperCamelCase: float = 0.1 , UpperCamelCase: float = 0.1 , UpperCamelCase: float = 0.1 , UpperCamelCase: int = 1_00 , UpperCamelCase: float = 0.02 , UpperCamelCase: Optional[int]=True , **UpperCamelCase: Optional[Any] , ):
"""simple docstring"""
A__ = prediction_length
A__ = context_length or prediction_length
A__ = distribution_output
A__ = loss
A__ = input_size
A__ = num_time_features
A__ = lags_sequence
A__ = scaling
A__ = num_dynamic_real_features
A__ = num_static_real_features
A__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
A__ = cardinality
else:
A__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
A__ = embedding_dimension
else:
A__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A__ = num_parallel_samples
# Transformer architecture configuration
A__ = input_size * len(_lowerCAmelCase ) + self._number_of_features
A__ = d_model
A__ = encoder_attention_heads
A__ = decoder_attention_heads
A__ = encoder_ffn_dim
A__ = decoder_ffn_dim
A__ = encoder_layers
A__ = decoder_layers
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = activation_function
A__ = init_std
A__ = use_cache
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 335
|
from __future__ import annotations
import os
from typing import Any
import requests
SCREAMING_SNAKE_CASE :Tuple = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
SCREAMING_SNAKE_CASE :Tuple = BASE_URL + '''/user'''
# https://github.com/settings/tokens
SCREAMING_SNAKE_CASE :Optional[Any] = os.environ.get('''USER_TOKEN''', '''''')
def _lowerCAmelCase ( lowerCAmelCase_ :str )->dict[Any, Any]:
'''simple docstring'''
snake_case_ = {
"Authorization": F'''token {auth_token}''',
"Accept": "application/vnd.github.v3+json",
}
return requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 159
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_A = logging.get_logger(__name__)
_A = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Optional[Any] = "trajectory_transformer"
UpperCAmelCase__ : Tuple = ["past_key_values"]
UpperCAmelCase__ : Union[str, Any] = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self : List[str] , _A : Any=1_0_0 , _A : Any=5 , _A : Optional[int]=1 , _A : Tuple=1 , _A : Dict=2_4_9 , _A : List[Any]=6 , _A : List[Any]=1_7 , _A : List[str]=2_5 , _A : Union[str, Any]=4 , _A : int=4 , _A : int=1_2_8 , _A : int=0.1 , _A : str=0.1 , _A : Dict=0.1 , _A : Dict=0.00_06 , _A : Any=5_1_2 , _A : List[str]=0.02 , _A : Optional[int]=1E-12 , _A : Tuple=1 , _A : Any=True , _A : List[Any]=1 , _A : Optional[int]=5_0_2_5_6 , _A : int=5_0_2_5_6 , **_A : Optional[int] , ) -> Union[str, Any]:
snake_case = vocab_size
snake_case = action_weight
snake_case = reward_weight
snake_case = value_weight
snake_case = max_position_embeddings
snake_case = block_size
snake_case = action_dim
snake_case = observation_dim
snake_case = transition_dim
snake_case = learning_rate
snake_case = n_layer
snake_case = n_head
snake_case = n_embd
snake_case = embd_pdrop
snake_case = attn_pdrop
snake_case = resid_pdrop
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = kaiming_initializer_range
snake_case = use_cache
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
| 137
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 137
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple , lowercase_ : List[str] ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
UpperCAmelCase : List[str] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase : str = 'sshleifer/tiny-gpt2'
UpperCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase : Any = PyTorchBenchmark(lowercase_ )
UpperCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
UpperCAmelCase : Optional[Any] = 'sgugger/tiny-distilbert-classification'
UpperCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , only_pretrain_model=lowercase_ , )
UpperCAmelCase : Tuple = PyTorchBenchmark(lowercase_ )
UpperCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Optional[Any] = 'sshleifer/tiny-gpt2'
UpperCAmelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , torchscript=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase : str = PyTorchBenchmark(lowercase_ )
UpperCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
UpperCAmelCase : Optional[int] = 'sshleifer/tiny-gpt2'
UpperCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , fpaa=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase : Optional[Any] = PyTorchBenchmark(lowercase_ )
UpperCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
UpperCAmelCase : List[Any] = 'sshleifer/tiny-gpt2'
UpperCAmelCase : int = AutoConfig.from_pretrained(lowercase_ )
# set architectures equal to `None`
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase : Optional[int] = PyTorchBenchmark(lowercase_ , configs=[config] )
UpperCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : str = 'sshleifer/tiny-gpt2'
UpperCAmelCase : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase : List[str] = PyTorchBenchmark(lowercase_ )
UpperCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def UpperCAmelCase_ ( self : Dict ) -> Any:
UpperCAmelCase : List[Any] = 'sshleifer/tiny-gpt2'
UpperCAmelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase_ , multi_process=lowercase_ , )
UpperCAmelCase : Tuple = PyTorchBenchmark(lowercase_ )
UpperCAmelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ ( self : int ) -> Tuple:
UpperCAmelCase : int = 'sshleifer/tiny-gpt2'
UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(lowercase_ )
UpperCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase : Union[str, Any] = PyTorchBenchmark(lowercase_ , configs=[config] )
UpperCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
UpperCAmelCase : Union[str, Any] = 'sshleifer/tinier_bart'
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase : List[Any] = PyTorchBenchmark(lowercase_ , configs=[config] )
UpperCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
UpperCAmelCase : Optional[Any] = 'sshleifer/tiny-gpt2'
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(lowercase_ )
UpperCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase : int = PyTorchBenchmark(lowercase_ , configs=[config] )
UpperCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
UpperCAmelCase : Tuple = 'sshleifer/tinier_bart'
UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(lowercase_ )
UpperCAmelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase : Any = PyTorchBenchmark(lowercase_ , configs=[config] )
UpperCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
UpperCAmelCase : int = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , save_to_csv=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase_ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowercase_ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowercase_ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowercase_ , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowercase_ , 'env.csv' ) , multi_process=lowercase_ , )
UpperCAmelCase : List[Any] = PyTorchBenchmark(lowercase_ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase_ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , 'env.csv' ) ).exists() )
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : List[Any] = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase_ : Any ):
self.assertTrue(hasattr(lowercase_ , 'sequential' ) )
self.assertTrue(hasattr(lowercase_ , 'cumulative' ) )
self.assertTrue(hasattr(lowercase_ , 'current' ) )
self.assertTrue(hasattr(lowercase_ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase_ , 'log.txt' ) , log_print=lowercase_ , trace_memory_line_by_line=lowercase_ , multi_process=lowercase_ , )
UpperCAmelCase : Tuple = PyTorchBenchmark(lowercase_ )
UpperCAmelCase : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase_ , 'log.txt' ) ).exists() )
| 151
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = set()
# edges = list of graph's edges
UpperCAmelCase : str = get_edges(UpperCAmelCase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = edges.pop()
chosen_vertices.add(UpperCAmelCase_ )
chosen_vertices.add(UpperCAmelCase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(UpperCAmelCase_ )
return chosen_vertices
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 151
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
lowerCAmelCase : Any = [8, 5, 9, 7]
lowerCAmelCase : Dict = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCAmelCase : int = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a , _a , ):
"""simple docstring"""
lowerCamelCase = claim_vector
lowerCamelCase = allocated_resources_table
lowerCamelCase = maximum_claim_table
def _lowerCAmelCase ( self ):
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _lowerCAmelCase ( self ):
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _lowerCAmelCase ( self ):
"""simple docstring"""
return {self.__need().index(_a ): i for i in self.__need()}
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
lowerCamelCase = self.__need()
lowerCamelCase = self.__allocated_resources_table
lowerCamelCase = self.__available_resources()
lowerCamelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
lowerCamelCase = False
for each_need in need_list:
lowerCamelCase = True
for index, need in enumerate(_a ):
if need > available_resources[index]:
lowerCamelCase = False
break
if execution:
lowerCamelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowerCamelCase = original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(_a )
# update available/freed resources stack
lowerCamelCase = np.array(_a ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(_a ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def _lowerCAmelCase ( self ):
"""simple docstring"""
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(_a ) + 1}'
+ """ """.join(f'{it:>8}' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(_a ) + 1}'
+ """ """.join(f'{it:>8}' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(_a ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(_a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
lowerCamelCase = {
"""input_ids""": tf.convert_to_tensor([[0, 2_646, 10_269, 83, 99_942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowerCamelCase = model(_a )["""last_hidden_state"""]
lowerCamelCase = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _a )
# compare the actual values for a slice.
lowerCamelCase = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 168
| 1
|
def __lowerCamelCase ( lowerCamelCase__ : list ):
'''simple docstring'''
if any(not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(lowerCamelCase__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowerCamelCase__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 252
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Any = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252
| 1
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCamelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase_ : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase_ : List[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase_ : Optional[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
'''simple docstring'''
A: Optional[int] = ZeroShotClassificationPipeline(
model=__lowerCamelCase , tokenizer=__lowerCamelCase , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
'''simple docstring'''
A: Tuple = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(__lowerCamelCase , {'''sequence''': ANY(__lowerCamelCase ), '''labels''': [ANY(__lowerCamelCase )], '''scores''': [ANY(__lowerCamelCase )]} )
# No kwarg
A: str = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(__lowerCamelCase , {'''sequence''': ANY(__lowerCamelCase ), '''labels''': [ANY(__lowerCamelCase )], '''scores''': [ANY(__lowerCamelCase )]} )
A: List[str] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(__lowerCamelCase , {'''sequence''': ANY(__lowerCamelCase ), '''labels''': [ANY(__lowerCamelCase )], '''scores''': [ANY(__lowerCamelCase )]} )
A: str = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
__lowerCamelCase , {'''sequence''': ANY(__lowerCamelCase ), '''labels''': [ANY(__lowerCamelCase ), ANY(__lowerCamelCase )], '''scores''': [ANY(__lowerCamelCase ), ANY(__lowerCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
A: Optional[int] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
__lowerCamelCase , {'''sequence''': ANY(__lowerCamelCase ), '''labels''': [ANY(__lowerCamelCase ), ANY(__lowerCamelCase )], '''scores''': [ANY(__lowerCamelCase ), ANY(__lowerCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
A: Union[str, Any] = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(__lowerCamelCase , {'''sequence''': ANY(__lowerCamelCase ), '''labels''': [ANY(__lowerCamelCase )], '''scores''': [ANY(__lowerCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
A: int = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
__lowerCamelCase , [
{'''sequence''': ANY(__lowerCamelCase ), '''labels''': [ANY(__lowerCamelCase ), ANY(__lowerCamelCase )], '''scores''': [ANY(__lowerCamelCase ), ANY(__lowerCamelCase )]}
for i in range(1 )
] , )
A: int = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
__lowerCamelCase , [
{'''sequence''': ANY(__lowerCamelCase ), '''labels''': [ANY(__lowerCamelCase ), ANY(__lowerCamelCase )], '''scores''': [ANY(__lowerCamelCase ), ANY(__lowerCamelCase )]}
for i in range(2 )
] , )
with self.assertRaises(__lowerCamelCase ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(__lowerCamelCase ):
classifier(__lowerCamelCase , candidate_labels='''politics''' )
with self.assertRaises(__lowerCamelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(__lowerCamelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=__lowerCamelCase )
with self.assertRaises(__lowerCamelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(__lowerCamelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=__lowerCamelCase , )
self.run_entailment_id(__lowerCamelCase )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Pipeline ) -> List[str]:
'''simple docstring'''
A: List[Any] = zero_shot_classifier.model.config
A: List[str] = config.labelaid
A: Union[str, Any] = zero_shot_classifier.entailment_id
A: str = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
A: Any = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A: Optional[Any] = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A: int = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
A: str = original_labelaid
self.assertEqual(__lowerCamelCase , zero_shot_classifier.entailment_id )
@require_torch
def _snake_case ( self : Any ) -> List[Any]:
'''simple docstring'''
A: Tuple = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 1_00 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def _snake_case ( self : Any ) -> Dict:
'''simple docstring'''
A: Any = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
A: Any = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def _snake_case ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
A: List[Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
A: Optional[int] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def _snake_case ( self : Dict ) -> int:
'''simple docstring'''
A: str = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
A: Any = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
A: Tuple = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__lowerCamelCase , )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def _snake_case ( self : Optional[int] ) -> Any:
'''simple docstring'''
A: Union[str, Any] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
A: Optional[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
A: Tuple = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__lowerCamelCase , )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 368
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
A: Tuple = len(__lowercase )
for i in range(length - 1 ):
A: Dict = i
for k in range(i + 1 , __lowercase ):
if collection[k] < collection[least]:
A: List[str] = k
if least != i:
A , A: Tuple = (collection[i], collection[least])
return collection
if __name__ == "__main__":
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 334
| 0
|
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
A__ : List[str] =yaml.safe_load(
'''\
name: \"\"
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Dataset Card for X\" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Table of Contents\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Dataset Description\"
allow_empty: false
allow_empty_text: false
subsections:
- name: \"Dataset Summary\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Supported Tasks and Leaderboards\"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
A__ : int ={
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
A__ : List[str] ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
A__ : str ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
A__ : List[str] ={
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Extra Ignored Subsection""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
}
],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
A__ : str ="""\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
A__ : Tuple =(
"""The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."""
)
A__ : List[Any] ="""\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
A__ : Dict =(
"""The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."""
)
A__ : List[str] ="""\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
A__ : str ="""The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."""
A__ : Optional[Any] ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
A__ : int ="""The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."""
A__ : Optional[Any] ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
"""
A__ : List[Any] ="""The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."""
A__ : Any ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
"""
A__ : Optional[Any] ="""The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."""
A__ : int ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
"""
A__ : str ="""The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."""
A__ : int ="""\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
A__ : List[Any] ="""The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."""
A__ : int ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
"""
A__ : Optional[Any] ="""The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."""
A__ : Dict ="""\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
A__ : List[Any] ="""The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."""
A__ : Optional[Any] =""""""
A__ : Tuple ="""The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."""
A__ : Optional[Any] ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
A__ : Dict ="""The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."""
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
assert ReadMe.from_string(__snake_case , __snake_case ).to_dict() == expected_dict
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
with pytest.raises(__snake_case , match=re.escape(expected_error.format(path="""root""" ) ) ):
_lowerCAmelCase = ReadMe.from_string(__snake_case , __snake_case )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
with pytest.raises(__snake_case , match=re.escape(expected_error.format(path="""root""" ) ) ):
ReadMe.from_string(__snake_case , __snake_case )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
ReadMe.from_string(__snake_case , __snake_case , suppress_parsing_errors=__snake_case )
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = Path(__snake_case ) / """README.md"""
with open(__snake_case , """w+""" ) as readme_file:
readme_file.write(__snake_case )
_lowerCAmelCase = ReadMe.from_readme(__snake_case , __snake_case ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = Path(__snake_case ) / """README.md"""
with open(__snake_case , """w+""" ) as readme_file:
readme_file.write(__snake_case )
_lowerCAmelCase = expected_error.format(path=__snake_case )
with pytest.raises(__snake_case , match=re.escape(__snake_case ) ):
_lowerCAmelCase = ReadMe.from_readme(__snake_case , __snake_case )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = Path(__snake_case ) / """README.md"""
with open(__snake_case , """w+""" ) as readme_file:
readme_file.write(__snake_case )
_lowerCAmelCase = expected_error.format(path=__snake_case )
with pytest.raises(__snake_case , match=re.escape(__snake_case ) ):
ReadMe.from_readme(__snake_case , __snake_case )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = Path(__snake_case ) / """README.md"""
with open(__snake_case , """w+""" ) as readme_file:
readme_file.write(__snake_case )
ReadMe.from_readme(__snake_case , __snake_case , suppress_parsing_errors=__snake_case )
| 70
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __UpperCamelCase :
def __init__( self ):
"""simple docstring"""
lowerCamelCase_ =''''''
lowerCamelCase_ =''''''
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =256
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =0
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =cva.imread(lowerCAmelCase, 0 )
lowerCamelCase_ =copy.deepcopy(self.img )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =plt.hist(self.img.ravel(), 256, [0, 256], label='''x''' )
lowerCamelCase_ =np.sum(lowerCAmelCase )
for i in range(len(lowerCAmelCase ) ):
lowerCamelCase_ =x[i] / self.k
self.sk += prk
lowerCamelCase_ =(self.L - 1) * self.sk
if self.rem != 0:
lowerCamelCase_ =int(last % last )
lowerCamelCase_ =int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase )
lowerCamelCase_ =int(np.ma.count(self.img ) / self.img[1].size )
lowerCamelCase_ =self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCamelCase_ =self.img[j][i]
if num != self.last_list[num]:
lowerCamelCase_ =self.last_list[num]
cva.imwrite('''output_data/output.jpg''', self.img )
def lowercase__ ( self ):
"""simple docstring"""
plt.hist(self.img.ravel(), 256, [0, 256] )
def lowercase__ ( self ):
"""simple docstring"""
cva.imshow('''Output-Image''', self.img )
cva.imshow('''Input-Image''', self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
a_ : str = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
a_ : Optional[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 75
| 0
|
"""simple docstring"""
import os
import platform
import sys
lowerCAmelCase : List[str] = '3'
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 359
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase = ksize + 1
lowerCamelCase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(snake_case__ ):
for x in range(snake_case__ ):
# distance from center
lowerCamelCase = x - ksize // 2
lowerCamelCase = y - ksize // 2
# degree to radiant
lowerCamelCase = theta / 1_80 * np.pi
lowerCamelCase = np.cos(_theta )
lowerCamelCase = np.sin(_theta )
# get kernel x
lowerCamelCase = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowerCAmelCase : Optional[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
lowerCAmelCase : Any = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowerCAmelCase : Optional[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowerCAmelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowerCAmelCase : Optional[int] = out / out.max() * 255
lowerCAmelCase : Tuple = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 168
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase__ = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase__ = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
UpperCAmelCase__ = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = RealmTokenizer
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase="[UNK]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[PAD]" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
_lowercase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase ) != tokenize_chinese_chars
):
_lowercase =getattr(UpperCAmelCase , normalizer_state.pop('''type''' ) )
_lowercase =do_lower_case
_lowercase =strip_accents
_lowercase =tokenize_chinese_chars
_lowercase =normalizer_class(**UpperCAmelCase )
_lowercase =do_lower_case
def __A (self , UpperCAmelCase , **UpperCAmelCase ) -> Any:
_lowercase =PaddingStrategy.MAX_LENGTH
_lowercase =text
_lowercase =kwargs.pop('''text_pair''' , UpperCAmelCase )
_lowercase =kwargs.pop('''return_tensors''' , UpperCAmelCase )
_lowercase ={
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(UpperCAmelCase ):
if batch_text_pair is not None:
_lowercase =batch_text_pair[idx]
else:
_lowercase =None
_lowercase =super().__call__(UpperCAmelCase , UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
_lowercase =encoded_candidates.get('''input_ids''' )
_lowercase =encoded_candidates.get('''attention_mask''' )
_lowercase =encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase )
_lowercase ={key: item for key, item in output_data.items() if len(UpperCAmelCase ) != 0}
return BatchEncoding(UpperCAmelCase , tensor_type=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase=None ) -> Dict:
_lowercase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
_lowercase =[self.sep_token_id]
_lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
_lowercase =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 5
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 5
| 1
|
def lowerCAmelCase__ ( lowerCamelCase_ : list[int] ,lowerCamelCase_ : int):
'''simple docstring'''
lowerCAmelCase__ : List[str] = len(__a)
lowerCAmelCase__ : Optional[int] = [[False] * (required_sum + 1) for _ in range(arr_len + 1)]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1):
lowerCAmelCase__ : Optional[Any] = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1):
lowerCAmelCase__ : Dict = False
for i in range(1 ,arr_len + 1):
for j in range(1 ,required_sum + 1):
if arr[i - 1] > j:
lowerCAmelCase__ : Union[str, Any] = subset[i - 1][j]
if arr[i - 1] <= j:
lowerCAmelCase__ : Tuple = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=7 ,__lowerCamelCase=3 ,__lowerCamelCase=18 ,__lowerCamelCase=30 ,__lowerCamelCase=4_00 ,__lowerCamelCase=True ,__lowerCamelCase=None ,__lowerCamelCase=True ,__lowerCamelCase=False ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=[0.5, 0.5, 0.5] ,__lowerCamelCase=[0.5, 0.5, 0.5] ,) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : List[str] = image_size
lowerCAmelCase__ : List[Any] = min_resolution
lowerCAmelCase__ : Union[str, Any] = max_resolution
lowerCAmelCase__ : Union[str, Any] = do_resize
lowerCAmelCase__ : str = size if size is not None else {'''height''': 18, '''width''': 20}
lowerCAmelCase__ : List[str] = do_thumbnail
lowerCAmelCase__ : str = do_align_axis
lowerCAmelCase__ : Optional[Any] = do_pad
lowerCAmelCase__ : Tuple = do_normalize
lowerCAmelCase__ : List[Any] = image_mean
lowerCAmelCase__ : List[str] = image_std
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =DonutImageProcessor if is_vision_available() else None
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = DonutImageProcessingTester(self )
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_thumbnail''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_align_long_axis''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_pad''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''image_std''' ) )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 20} )
lowerCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
lowerCAmelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=(42, 84) )
self.assertEqual(image_processor.size ,{'''height''': 84, '''width''': 42} )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,Image.Image )
# Test not batched input
lowerCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Optional[int] = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
@is_flaky()
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase ,numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,np.ndarray )
# Test not batched input
lowerCAmelCase__ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : int = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
@is_flaky()
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase ,torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : Any = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Dict = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
| 94
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ = 1_00_00_00 ) -> int:
__lowerCamelCase = set(range(3 , UpperCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCamelCase__ , UpperCamelCase__ ) ) )
__lowerCamelCase = [float(UpperCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase__ , limit + 1 , UpperCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 67
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = 0
snake_case = False
snake_case = 3.0
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_A = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_A = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , __UpperCAmelCase )
@require_multi_gpu
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCamelCase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCamelCase_ = torch.nn.Linear(1_00, 2_00)
lowerCamelCase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCamelCase_ = ''''''
lowerCamelCase_ = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 79
| 0
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _snake_case :
'''simple docstring'''
A__ : int
A__ : TreeNode | None = None
A__ : TreeNode | None = None
UpperCamelCase_ = namedtuple('''CoinsDistribResult''', '''moves excess''')
def lowerCamelCase_ ( _a : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_a : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_a : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_a ) != count_coins(_a ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(_a : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_distrib(node.right )
UpperCAmelCase_ : Tuple = 1 - left_distrib_excess
UpperCAmelCase_ : Tuple = 1 - right_distrib_excess
UpperCAmelCase_ : Optional[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(_a )
+ abs(_a )
)
UpperCAmelCase_ : List[str] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_a , _a )
return get_distrib(_a )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _snake_case ( __snake_case , __snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self: Dict ,lowerCamelCase_: bool ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[int] = None ) -> str:
super().__init__()
UpperCAmelCase_ : Tuple = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase_ : str = torch.zeros(lowerCamelCase_ ,lowerCamelCase_ )
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = torch.nn.Parameter(lowerCamelCase_ )
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : VQModel
A__ : CLIPTextModel
A__ : CLIPTokenizer
A__ : TransformeraDModel
A__ : LearnedClassifierFreeSamplingEmbeddings
A__ : VQDiffusionScheduler
def __init__( self: List[Any] ,lowerCamelCase_: VQModel ,lowerCamelCase_: CLIPTextModel ,lowerCamelCase_: CLIPTokenizer ,lowerCamelCase_: TransformeraDModel ,lowerCamelCase_: VQDiffusionScheduler ,lowerCamelCase_: LearnedClassifierFreeSamplingEmbeddings ,) -> Tuple:
super().__init__()
self.register_modules(
vqvae=lowerCamelCase_ ,transformer=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,learned_classifier_free_sampling_embeddings=lowerCamelCase_ ,)
def A__ ( self: Optional[int] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = len(lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else 1
# get prompt text embeddings
UpperCAmelCase_ : Optional[int] = self.tokenizer(
lowerCamelCase_ ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,return_tensors="""pt""" ,)
UpperCAmelCase_ : Optional[int] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase_ : List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase_ : Any = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase_ : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase_ : Dict = prompt_embeds / prompt_embeds.norm(dim=-1 ,keepdim=lowerCamelCase_ )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase_ : Optional[int] = prompt_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase_ : str = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase_ : str = negative_prompt_embeds.unsqueeze(0 ).repeat(lowerCamelCase_ ,1 ,1 )
else:
UpperCAmelCase_ : Dict = [""""""] * batch_size
UpperCAmelCase_ : Tuple = text_input_ids.shape[-1]
UpperCAmelCase_ : Optional[Any] = self.tokenizer(
lowerCamelCase_ ,padding="""max_length""" ,max_length=lowerCamelCase_ ,truncation=lowerCamelCase_ ,return_tensors="""pt""" ,)
UpperCAmelCase_ : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase_ : Optional[int] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 ,keepdim=lowerCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ : List[str] = negative_prompt_embeds.shape[1]
UpperCAmelCase_ : Optional[Any] = negative_prompt_embeds.repeat(1 ,lowerCamelCase_ ,1 )
UpperCAmelCase_ : List[str] = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,lowerCamelCase_ ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Tuple ,lowerCamelCase_: Union[str, List[str]] ,lowerCamelCase_: int = 100 ,lowerCamelCase_: float = 5.0 ,lowerCamelCase_: float = 1.0 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase_: Optional[torch.FloatTensor] = None ,lowerCamelCase_: Optional[str] = "pil" ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,lowerCamelCase_: int = 1 ,) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Tuple = 1
elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase_ )}''' )
UpperCAmelCase_ : Any = batch_size * num_images_per_prompt
UpperCAmelCase_ : Optional[Any] = guidance_scale > 1.0
UpperCAmelCase_ : int = self._encode_prompt(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowerCamelCase_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase_ : Tuple = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase_ : Optional[int] = self.transformer.num_vector_embeds - 1
UpperCAmelCase_ : Tuple = torch.full(lowerCamelCase_ ,lowerCamelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCAmelCase_ : Any = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase_ ,device=self.device )
UpperCAmelCase_ : Optional[int] = self.scheduler.timesteps.to(self.device )
UpperCAmelCase_ : str = latents
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase_ : Any = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase_ : Optional[Any] = self.transformer(lowerCamelCase_ ,encoder_hidden_states=lowerCamelCase_ ,timestep=lowerCamelCase_ ).sample
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Any = model_output.chunk(2 )
UpperCAmelCase_ : Dict = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowerCamelCase_ ,dim=1 ,keepdim=lowerCamelCase_ )
UpperCAmelCase_ : Any = self.truncate(lowerCamelCase_ ,lowerCamelCase_ )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase_ : List[Any] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : Dict = self.scheduler.step(lowerCamelCase_ ,timestep=lowerCamelCase_ ,sample=lowerCamelCase_ ,generator=lowerCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = self.vqvae.config.vq_embed_dim
UpperCAmelCase_ : str = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase_ : Any = self.vqvae.quantize.get_codebook_entry(lowerCamelCase_ ,shape=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.vqvae.decode(lowerCamelCase_ ,force_not_quantize=lowerCamelCase_ ).sample
UpperCAmelCase_ : Optional[int] = (image / 2 + 0.5).clamp(0 ,1 )
UpperCAmelCase_ : str = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ : Tuple = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
def A__ ( self: Any ,lowerCamelCase_: torch.FloatTensor ,lowerCamelCase_: float ) -> torch.FloatTensor:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.sort(lowerCamelCase_ ,1 ,descending=lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.exp(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase_ : Any = torch.full_like(keep_mask[:, 0:1, :] ,lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.cat((all_true, keep_mask) ,dim=1 )
UpperCAmelCase_ : Tuple = keep_mask[:, :-1, :]
UpperCAmelCase_ : Dict = keep_mask.gather(1 ,indices.argsort(1 ) )
UpperCAmelCase_ : List[Any] = log_p_x_0.clone()
UpperCAmelCase_ : Optional[int] = -torch.inf # -inf = log(0)
return rv
| 59
| 1
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE :Optional[int] = NewType('DataClass', Any)
SCREAMING_SNAKE_CASE :int = NewType('DataClassType', Any)
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(a_ , a_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def UpperCAmelCase ( a_ ) -> Callable[[str], Any]:
"""simple docstring"""
__A = {str(a_ ): choice for choice in choices}
return lambda a_ : str_to_choice.get(a_ , a_ )
def UpperCAmelCase ( *,
a_ = None , a_ = None , a_ = dataclasses.MISSING , a_ = dataclasses.MISSING , a_ = None , **a_ , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__A = {}
if aliases is not None:
__A = aliases
if help is not None:
__A = help
return dataclasses.field(metadata=a_ , default=a_ , default_factory=a_ , **a_ )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = 42
def __init__( self : Union[str, Any] ,A : Union[DataClassType, Iterable[DataClassType]] ,**A : List[Any] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__A = ArgumentDefaultsHelpFormatter
super().__init__(**A )
if dataclasses.is_dataclass(A ):
__A = [dataclass_types]
__A = list(A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(A )
@staticmethod
def UpperCamelCase_ ( A : ArgumentParser ,A : dataclasses.Field ):
__A = f'''--{field.name}'''
__A = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,A ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
__A = kwargs.pop("aliases" ,[] )
if isinstance(A ,A ):
__A = [aliases]
__A = getattr(field.type ,"__origin__" ,field.type )
if origin_type is Union or (hasattr(A ,"UnionType" ) and isinstance(A ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(A ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f''' Problem encountered in field \'{field.name}\'.''' )
if type(A ) not in field.type.__args__:
# filter `str` in Union
__A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__A = getattr(field.type ,"__origin__" ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__A = (
field.type.__args__[0] if isinstance(A ,field.type.__args__[1] ) else field.type.__args__[1]
)
__A = getattr(field.type ,"__origin__" ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__A = {}
if origin_type is Literal or (isinstance(field.type ,A ) and issubclass(field.type ,A )):
if origin_type is Literal:
__A = field.type.__args__
else:
__A = [x.value for x in field.type]
__A = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
__A = field.default
else:
__A = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__A = copy(A )
# Hack because type=bool in argparse does not behave as we want.
__A = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__A = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__A = default
# This tells argparse we accept 0 or 1 value after --field_name
__A = "?"
# This is the value that will get picked if we do --field_name (without value)
__A = True
elif isclass(A ) and issubclass(A ,A ):
__A = field.type.__args__[0]
__A = "+"
if field.default_factory is not dataclasses.MISSING:
__A = field.default_factory()
elif field.default is dataclasses.MISSING:
__A = True
else:
__A = field.type
if field.default is not dataclasses.MISSING:
__A = field.default
elif field.default_factory is not dataclasses.MISSING:
__A = field.default_factory()
else:
__A = True
parser.add_argument(A ,*A ,**A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__A = False
parser.add_argument(f'''--no_{field.name}''' ,action="store_false" ,dest=field.name ,**A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : DataClassType ):
if hasattr(A ,"_argument_group_name" ):
__A = self.add_argument_group(dtype._argument_group_name )
else:
__A = self
try:
__A = get_type_hints(A )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A ):
__A = ".".join(map(A ,sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(A ):
if not field.init:
continue
__A = type_hints[field.name]
self._parse_dataclass_field(A ,A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[Any]=None ,A : List[Any]=False ,A : Optional[Any]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=None ,):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__A = []
if args_filename:
args_files.append(Path(A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__A = ArgumentParser()
args_file_parser.add_argument(A ,type=A ,action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
__A , __A = args_file_parser.parse_known_args(args=A )
__A = vars(A ).get(args_file_flag.lstrip("-" ) ,A )
if cmd_args_file_paths:
args_files.extend([Path(A ) for p in cmd_args_file_paths] )
__A = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__A = file_args + args if args is not None else file_args + sys.argv[1:]
__A , __A = self.parse_known_args(args=A )
__A = []
for dtype in self.dataclass_types:
__A = {f.name for f in dataclasses.fields(A ) if f.init}
__A = {k: v for k, v in vars(A ).items() if k in keys}
for k in keys:
delattr(A ,A )
__A = dtype(**A )
outputs.append(A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def UpperCamelCase_ ( self : Dict ,A : Dict[str, Any] ,A : bool = False ):
__A = set(args.keys() )
__A = []
for dtype in self.dataclass_types:
__A = {f.name for f in dataclasses.fields(A ) if f.init}
__A = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__A = dtype(**A )
outputs.append(A )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(A )}''' )
return tuple(A )
def UpperCamelCase_ ( self : List[str] ,A : str ,A : bool = False ):
with open(Path(A ) ,encoding="utf-8" ) as open_json_file:
__A = json.loads(open_json_file.read() )
__A = self.parse_dict(A ,allow_extra_keys=A )
return tuple(A )
def UpperCamelCase_ ( self : int ,A : str ,A : bool = False ):
__A = self.parse_dict(yaml.safe_load(Path(A ).read_text() ) ,allow_extra_keys=A )
return tuple(A )
| 15
|
from typing import Dict, Optional
import numpy as np
import datasets
SCREAMING_SNAKE_CASE :List[Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
SCREAMING_SNAKE_CASE :List[str] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
SCREAMING_SNAKE_CASE :str = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = False , ) -> Tuple:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
__A = new_id
# turn into Numpy arrays
__A = np.array(a_ )
__A = np.array(a_ )
if reduce_labels:
__A = 2_5_5
__A = label - 1
__A = 2_5_5
__A = label != ignore_index
__A = np.not_equal(a_ , a_ )
__A = pred_label[mask]
__A = np.array(a_ )[mask]
__A = pred_label[pred_label == label]
__A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0]
__A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0]
__A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0]
__A = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = False , ) -> Union[str, Any]:
"""simple docstring"""
__A = np.zeros((num_labels,) , dtype=np.floataa )
__A = np.zeros((num_labels,) , dtype=np.floataa )
__A = np.zeros((num_labels,) , dtype=np.floataa )
__A = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a_ , a_ ):
__A , __A , __A , __A = intersect_and_union(
a_ , a_ , a_ , a_ , a_ , a_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = None , a_ = False , ) -> str:
"""simple docstring"""
__A , __A , __A , __A = total_intersect_and_union(
a_ , a_ , a_ , a_ , a_ , a_ )
# compute metrics
__A = {}
__A = total_area_intersect.sum() / total_area_label.sum()
__A = total_area_intersect / total_area_union
__A = total_area_intersect / total_area_label
__A = np.nanmean(a_ )
__A = np.nanmean(a_ )
__A = all_acc
__A = iou
__A = acc
if nan_to_num is not None:
__A = {metric: np.nan_to_num(a_ , nan=a_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ) ,reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] ,)
def UpperCamelCase_ ( self : int ,A : Optional[Any] ,A : Optional[Any] ,A : int ,A : bool ,A : Optional[int] = None ,A : Optional[Dict[int, int]] = None ,A : bool = False ,):
__A = mean_iou(
results=A ,gt_seg_maps=A ,num_labels=A ,ignore_index=A ,nan_to_num=A ,label_map=A ,reduce_labels=A ,)
return iou_result
| 15
| 1
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def UpperCamelCase( UpperCAmelCase_ ):
for param in module.parameters():
UpperCAmelCase : int = False
def UpperCamelCase( ):
UpperCAmelCase : Any = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : List[str] = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = plt.imshow(UpperCAmelCase_ )
fig.axes.get_xaxis().set_visible(UpperCAmelCase_ )
fig.axes.get_yaxis().set_visible(UpperCAmelCase_ )
plt.show()
def UpperCamelCase( ):
UpperCAmelCase : Any = datetime.now()
UpperCAmelCase : Optional[int] = current_time.strftime('%H:%M:%S' )
return timestamp
| 371
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = ["""pixel_values"""]
def __init__( self : Dict , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Any , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase : Dict = size if size is not None else {'shortest_edge': 224}
UpperCAmelCase : Any = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase : List[str] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCAmelCase : Any = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name='crop_size' )
UpperCAmelCase : Optional[int] = do_resize
UpperCAmelCase : str = size
UpperCAmelCase : str = resample
UpperCAmelCase : Union[str, Any] = do_center_crop
UpperCAmelCase : Any = crop_size
UpperCAmelCase : Optional[Any] = do_rescale
UpperCAmelCase : List[Any] = rescale_factor
UpperCAmelCase : Any = do_normalize
UpperCAmelCase : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase : str = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase : Union[str, Any] = do_convert_rgb
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray:
UpperCAmelCase : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase : str = get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Any , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ) -> np.ndarray:
UpperCAmelCase : List[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ) -> Union[str, Any]:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : int , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : List[Any] , ) -> PIL.Image.Image:
UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : List[str] = size if size is not None else self.size
UpperCAmelCase : Dict = get_size_dict(lowercase_ , param_name='size' , default_to_square=lowercase_ )
UpperCAmelCase : int = resample if resample is not None else self.resample
UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase : Union[str, Any] = get_size_dict(lowercase_ , param_name='crop_size' , default_to_square=lowercase_ )
UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : str = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase : Union[str, Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase : int = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase : List[Any] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase : Dict = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
UpperCAmelCase : List[str] = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase : List[Any] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase : Optional[int] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase : str = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 280
| 0
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowercase : Optional[int] = True
except ImportError:
lowercase : int = False
try:
from torch.hub import _get_torch_home
lowercase : List[Any] = _get_torch_home()
except ImportError:
lowercase : Optional[int] = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
lowercase : List[str] = os.path.join(torch_cache_home, """transformers""")
lowercase : List[str] = """https://cdn.huggingface.co"""
lowercase : int = """https://s3.amazonaws.com/models.huggingface.co/bert"""
lowercase : int = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
lowercase : List[Any] = os.path.join(PATH, """config.yaml""")
lowercase : List[str] = os.path.join(PATH, """attributes.txt""")
lowercase : Optional[Any] = os.path.join(PATH, """objects.txt""")
lowercase : Tuple = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
lowercase : int = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
lowercase : Optional[Any] = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
lowercase : Tuple = """pytorch_model.bin"""
lowercase : int = """config.yaml"""
def A_ ( A__=OBJECTS , A__=ATTRIBUTES ) -> Union[str, Any]:
a__ : Dict = []
with open(A__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
a__ : Optional[int] = []
with open(A__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def A_ ( A__ ) -> Union[str, Any]:
a__ : Optional[Any] = OrderedDict()
with open(A__ , 'rb' ) as f:
a__ : List[str] = pkl.load(A__ )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
a__ : Tuple = ckp.pop(A__ )
if isinstance(A__ , np.ndarray ):
a__ : str = torch.tensor(A__ )
else:
assert isinstance(A__ , torch.tensor ), type(A__ )
a__ : Optional[int] = v
return r
class A__ :
"""simple docstring"""
__A : Optional[Any] = {}
def __init__( self , lowercase , lowercase = "root" , lowercase=0) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] = name
a__ : Optional[Any] = level
a__ : Tuple = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
a__ : Dict = copy.deepcopy(lowercase)
a__ : Any = copy.deepcopy(lowercase)
if isinstance(lowercase , lowercase):
a__ : Tuple = Config(lowercase , name=lowercase , level=level + 1)
a__ : Dict = v
setattr(self , lowercase , lowercase)
a__ : List[Any] = d
def __repr__( self) -> Optional[int]:
'''simple docstring'''
return str(list((self._pointer.keys())))
def __setattr__( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : str = val
a__ : Tuple = val
a__ : int = key.split('.')
a__ : Tuple = len(lowercase) - 1
a__ : int = self._pointer
if len(lowercase) > 1:
for i, l in enumerate(lowercase):
if hasattr(self , lowercase) and isinstance(getattr(self , lowercase) , lowercase):
setattr(getattr(self , lowercase) , '.'.join(levels[i:]) , lowercase)
if l == last_level:
a__ : List[str] = val
else:
a__ : Tuple = pointer[l]
def __lowercase ( self) -> Tuple:
'''simple docstring'''
return self._pointer
def __lowercase ( self , lowercase , lowercase) -> str:
'''simple docstring'''
with open(F'{file_name}' , 'w') as stream:
dump(lowercase , lowercase)
def __lowercase ( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
with open(F'{file_name}' , 'w') as stream:
json.dump(lowercase , lowercase)
@staticmethod
def __lowercase ( lowercase) -> Optional[int]:
'''simple docstring'''
with open(lowercase) as stream:
a__ : Tuple = load(lowercase , Loader=lowercase)
return data
def __str__( self) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = ' '
if self._name != "root":
a__ : Any = F'{t * (self._level-1)}{self._name}:\n'
else:
a__ : Dict = ''
a__ : Tuple = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(lowercase , lowercase):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(lowercase).__name__})\n'
a__ : Tuple = level
return r[:-1]
@classmethod
def __lowercase ( cls , lowercase , **lowercase) -> Tuple:
'''simple docstring'''
a__ , a__ : Optional[Any] = cls.get_config_dict(lowercase , **lowercase)
return cls(lowercase)
@classmethod
def __lowercase ( cls , lowercase , **lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = kwargs.pop('cache_dir' , lowercase)
a__ : Tuple = kwargs.pop('force_download' , lowercase)
a__ : str = kwargs.pop('resume_download' , lowercase)
a__ : Tuple = kwargs.pop('proxies' , lowercase)
a__ : List[str] = kwargs.pop('local_files_only' , lowercase)
if os.path.isdir(lowercase):
a__ : int = os.path.join(lowercase , lowercase)
elif os.path.isfile(lowercase) or is_remote_url(lowercase):
a__ : Union[str, Any] = pretrained_model_name_or_path
else:
a__ : Tuple = hf_bucket_url(lowercase , filename=lowercase , use_cdn=lowercase)
try:
# Load from URL or cache if already cached
a__ : Optional[Any] = cached_path(
lowercase , cache_dir=lowercase , force_download=lowercase , proxies=lowercase , resume_download=lowercase , local_files_only=lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
a__ : Optional[int] = Config.load_yaml(lowercase)
except EnvironmentError:
a__ : List[str] = 'Can\'t load config for'
raise EnvironmentError(lowercase)
if resolved_config_file == config_file:
print('loading configuration file from path')
else:
print('loading configuration file cache')
return Config.load_yaml(lowercase), kwargs
def A_ ( A__ ) -> Union[str, Any]:
a__ : str = torch.load('dump.pt' , map_location=in_tensor.device )
a__ : Optional[Any] = in_tensor.numpy()
a__ : List[Any] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(A__ , A__ , rtol=0.01 , atol=0.1 ), (
F'{sum([1 for x in np.isclose(A__ , A__ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def A_ ( A__ ) -> str:
a__ : Dict = urlparse(A__ )
return parsed.scheme in ("http", "https")
def A_ ( A__ , A__ , A__=True ) -> str:
a__ : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
a__ : Any = '/' not in model_id
if legacy_format:
return F'{endpoint}/{model_id}-{filename}'
else:
return F'{endpoint}/{model_id}/{filename}'
def A_ ( A__ , A__ , A__=None , A__=0 , A__=None , ) -> Union[str, Any]:
a__ : int = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(A__ , A__ ):
ua += "; " + "; ".join('{}/{}'.format(A__ , A__ ) for k, v in user_agent.items() )
elif isinstance(A__ , A__ ):
ua += "; " + user_agent
a__ : Dict = {'user-agent': ua}
if resume_size > 0:
a__ : Optional[Any] = 'bytes=%d-' % (resume_size,)
a__ : Any = requests.get(A__ , stream=A__ , proxies=A__ , headers=A__ )
if response.status_code == 416: # Range not satisfiable
return
a__ : List[Any] = response.headers.get('Content-Length' )
a__ : Optional[Any] = resume_size + int(A__ ) if content_length is not None else None
a__ : Any = tqdm(
unit='B' , unit_scale=A__ , total=A__ , initial=A__ , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(A__ ) )
temp_file.write(A__ )
progress.close()
def A_ ( A__ , A__=None , A__=False , A__=None , A__=10 , A__=False , A__=None , A__=False , ) -> List[str]:
if cache_dir is None:
a__ : str = TRANSFORMERS_CACHE
if isinstance(A__ , A__ ):
a__ : Dict = str(A__ )
os.makedirs(A__ , exist_ok=A__ )
a__ : Union[str, Any] = None
if not local_files_only:
try:
a__ : Union[str, Any] = requests.head(A__ , allow_redirects=A__ , proxies=A__ , timeout=A__ )
if response.status_code == 200:
a__ : Union[str, Any] = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
a__ : Optional[Any] = url_to_filename(A__ , A__ )
# get cache path to put the file
a__ : str = os.path.join(A__ , A__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(A__ ):
return cache_path
else:
a__ : List[Any] = [
file
for file in fnmatch.filter(os.listdir(A__ ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(A__ ) > 0:
return os.path.join(A__ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(A__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
a__ : str = cache_path + '.lock'
with FileLock(A__ ):
# If the download just completed while the lock was activated.
if os.path.exists(A__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
a__ : Any = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(A__ , 'a+b' ) as f:
yield f
a__ : Optional[Any] = _resumable_file_manager
if os.path.exists(A__ ):
a__ : str = os.stat(A__ ).st_size
else:
a__ : str = 0
else:
a__ : List[Any] = partial(tempfile.NamedTemporaryFile , dir=A__ , delete=A__ )
a__ : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , A__ , temp_file.name , )
http_get(
A__ , A__ , proxies=A__ , resume_size=A__ , user_agent=A__ , )
os.replace(temp_file.name , A__ )
a__ : Dict = {'url': url, 'etag': etag}
a__ : str = cache_path + '.json'
with open(A__ , 'w' ) as meta_file:
json.dump(A__ , A__ )
return cache_path
def A_ ( A__ , A__=None ) -> Union[str, Any]:
a__ : int = url.encode('utf-8' )
a__ : str = shaaaa(A__ )
a__ : Union[str, Any] = url_hash.hexdigest()
if etag:
a__ : Optional[Any] = etag.encode('utf-8' )
a__ : Tuple = shaaaa(A__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def A_ ( A__ , A__=None , A__=False , A__=None , A__=False , A__=None , A__=False , A__=False , A__=False , ) -> Any:
if cache_dir is None:
a__ : Tuple = TRANSFORMERS_CACHE
if isinstance(A__ , A__ ):
a__ : Optional[Any] = str(A__ )
if isinstance(A__ , A__ ):
a__ : Optional[int] = str(A__ )
if is_remote_url(A__ ):
# URL, so get it from the cache (downloading if necessary)
a__ : Any = get_from_cache(
A__ , cache_dir=A__ , force_download=A__ , proxies=A__ , resume_download=A__ , user_agent=A__ , local_files_only=A__ , )
elif os.path.exists(A__ ):
# File, and it exists.
a__ : Tuple = url_or_filename
elif urlparse(A__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(A__ ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(A__ ) )
if extract_compressed_file:
if not is_zipfile(A__ ) and not tarfile.is_tarfile(A__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
a__ , a__ : List[Any] = os.path.split(A__ )
a__ : Dict = output_file.replace('.' , '-' ) + '-extracted'
a__ : Union[str, Any] = os.path.join(A__ , A__ )
if os.path.isdir(A__ ) and os.listdir(A__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
a__ : List[Any] = output_path + '.lock'
with FileLock(A__ ):
shutil.rmtree(A__ , ignore_errors=A__ )
os.makedirs(A__ )
if is_zipfile(A__ ):
with ZipFile(A__ , 'r' ) as zip_file:
zip_file.extractall(A__ )
zip_file.close()
elif tarfile.is_tarfile(A__ ):
a__ : List[Any] = tarfile.open(A__ )
tar_file.extractall(A__ )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(A__ ) )
return output_path_extracted
return output_path
def A_ ( A__ , A__="," ) -> Union[str, Any]:
assert isinstance(A__ , A__ )
if os.path.isfile(A__ ):
with open(A__ ) as f:
a__ : List[Any] = eval(f.read() )
else:
a__ : Optional[Any] = requests.get(A__ )
try:
a__ : str = requests.json()
except Exception:
a__ : str = req.content.decode()
assert data is not None, "could not connect"
try:
a__ : Tuple = eval(A__ )
except Exception:
a__ : List[Any] = data.split('\n' )
req.close()
return data
def A_ ( A__ ) -> List[str]:
a__ : str = requests.get(A__ )
a__ : str = np.array(Image.open(BytesIO(response.content ) ) )
return img
def A_ ( A__ ) -> Any:
a__ : List[str] = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(A__ )
with open(A__ , 'rb' ) as stream:
a__ : Tuple = pkl.load(A__ )
a__ : Optional[Any] = weights.pop('model' )
a__ : int = {}
for k, v in model.items():
a__ : List[str] = torch.from_numpy(A__ )
if "running_var" in k:
a__ : str = torch.tensor([0] )
a__ : List[Any] = k.replace('running_var' , 'num_batches_tracked' )
a__ : int = zero
return new
def A_ ( ) -> List[Any]:
print(F'{os.path.abspath(os.path.join(A__ , os.pardir ) )}/demo.ipynb' )
def A_ ( A__ , A__="RGB" ) -> List[Any]:
assert isinstance(A__ , A__ )
if os.path.isfile(A__ ):
a__ : Any = cva.imread(A__ )
else:
a__ : Dict = get_image_from_url(A__ )
assert img is not None, F'could not connect to: {im}'
a__ : Dict = cva.cvtColor(A__ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
a__ : Optional[int] = img[:, :, ::-1]
return img
def A_ ( A__ , A__=1 ) -> Tuple:
return (images[i : i + batch] for i in range(0 , len(A__ ) , A__ ))
| 99
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase : List[str] = logging.get_logger(__name__)
@dataclass
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Optional[Any] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **lowercase) -> Tuple:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
a__ : Union[str, Any] = deprecated_arg[3:]
setattr(self , lowercase , not kwargs.pop(lowercase))
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}')
a__ : Union[str, Any] = kwargs.pop('torchscript' , self.torchscript)
a__ : Tuple = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics)
a__ : Tuple = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level)
super().__init__(**lowercase)
__A : bool = field(default=__UpperCAmelCase , metadata={'''help''': '''Trace the models using torchscript'''} )
__A : bool = field(default=__UpperCAmelCase , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
__A : str = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def __lowercase ( self) -> Tuple["torch.device", int]:
'''simple docstring'''
requires_backends(self , ['torch'])
logger.info('PyTorch: setting up devices')
if not self.cuda:
a__ : List[str] = torch.device('cpu')
a__ : Optional[Any] = 0
elif is_torch_tpu_available():
a__ : List[str] = xm.xla_device()
a__ : Union[str, Any] = 0
else:
a__ : List[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
a__ : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def __lowercase ( self) -> int:
'''simple docstring'''
requires_backends(self , ['torch'])
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __lowercase ( self) -> "torch.device":
'''simple docstring'''
requires_backends(self , ['torch'])
return self._setup_devices[0]
@property
def __lowercase ( self) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'])
return self._setup_devices[1]
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return self.n_gpu > 0
| 99
| 1
|
'''simple docstring'''
import re
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' ,str_ )]
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: bool ,__UpperCamelCase: str ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE : Any = split_input(__UpperCamelCase )
if upper:
SCREAMING_SNAKE_CASE : Dict = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
SCREAMING_SNAKE_CASE : List[str] = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
return to_simple_case(__UpperCamelCase )
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE : List[str] = to_simple_case(__UpperCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: bool ):
"""simple docstring"""
return to_complex_case(__UpperCamelCase ,__UpperCamelCase ,'_' )
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: bool ):
"""simple docstring"""
return to_complex_case(__UpperCamelCase ,__UpperCamelCase ,'-' )
if __name__ == "__main__":
__import__("doctest").testmod()
| 246
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = value
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = tree
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246
| 1
|
from math import isqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCamelCase__ ) + 1 ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0**6 ) -> int:
__lowerCamelCase : List[str] = 0
__lowerCamelCase : List[str] = 1
__lowerCamelCase : Optional[int] = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCamelCase__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73
|
'''simple docstring'''
import math
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = input("Enter message: " )
UpperCAmelCase_ = int(input(f"""Enter key [2-{len(snake_case_ ) - 1}]: """ ) )
UpperCAmelCase_ = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
UpperCAmelCase_ = encrypt_message(snake_case_ , snake_case_ )
elif mode.lower().startswith("d" ):
UpperCAmelCase_ = decrypt_message(snake_case_ , snake_case_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [""] * key
for col in range(snake_case_ ):
UpperCAmelCase_ = col
while pointer < len(snake_case_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = math.ceil(len(snake_case_ ) / key )
UpperCAmelCase_ = key
UpperCAmelCase_ = (num_cols * num_rows) - len(snake_case_ )
UpperCAmelCase_ = [""] * num_cols
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase_ = 0
row += 1
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1
| 0
|
'''simple docstring'''
import os
import pytest
from attr import dataclass
_snake_case : Tuple = 'us-east-1' # defaults region
@dataclass
class A :
lowercase_ = 42
lowercase_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
lowercase_ = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5500,
}
lowercase_ = {**hyperparameters, 'max_steps': 1000}
@property
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return F'{self.framework}-transfromers-test'
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return F'./tests/sagemaker/scripts/{self.framework}'
@property
def __lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''' )
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = SageMakerTestEnvironment(framework=request.cls.framework )
| 179
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = (PNDMScheduler,)
UpperCAmelCase : str = (('''num_inference_steps''', 50),)
def lowerCAmelCase_ ( self : List[str] , **_UpperCAmelCase : str ):
_A = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**A__ )
return config
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Any=0 , **_UpperCAmelCase : Any ):
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('num_inference_steps' , A__ )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config(**A__ )
_A = scheduler_class(**A__ )
scheduler.set_timesteps(A__ )
# copy over dummy past residuals
_A = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__ )
_A = scheduler_class.from_pretrained(A__ )
new_scheduler.set_timesteps(A__ )
# copy over dummy past residuals
_A = dummy_past_residuals[:]
_A = scheduler.step_prk(A__ , A__ , A__ , **A__ ).prev_sample
_A = new_scheduler.step_prk(A__ , A__ , A__ , **A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_A = scheduler.step_plms(A__ , A__ , A__ , **A__ ).prev_sample
_A = new_scheduler.step_plms(A__ , A__ , A__ , **A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self : Dict ):
pass
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any]=0 , **_UpperCAmelCase : int ):
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('num_inference_steps' , A__ )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**A__ )
scheduler.set_timesteps(A__ )
# copy over dummy past residuals (must be after setting timesteps)
_A = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__ )
_A = scheduler_class.from_pretrained(A__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(A__ )
# copy over dummy past residual (must be after setting timesteps)
_A = dummy_past_residuals[:]
_A = scheduler.step_prk(A__ , A__ , A__ , **A__ ).prev_sample
_A = new_scheduler.step_prk(A__ , A__ , A__ , **A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_A = scheduler.step_plms(A__ , A__ , A__ , **A__ ).prev_sample
_A = new_scheduler.step_plms(A__ , A__ , A__ , **A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self : Dict , **_UpperCAmelCase : List[Any] ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(**A__ )
_A = scheduler_class(**A__ )
_A = 10
_A = self.dummy_model()
_A = self.dummy_sample_deter
scheduler.set_timesteps(A__ )
for i, t in enumerate(scheduler.prk_timesteps ):
_A = model(A__ , A__ )
_A = scheduler.step_prk(A__ , A__ , A__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_A = model(A__ , A__ )
_A = scheduler.step_plms(A__ , A__ , A__ ).prev_sample
return sample
def lowerCAmelCase_ ( self : Dict ):
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('num_inference_steps' , A__ )
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**A__ )
_A = self.dummy_sample
_A = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ , 'set_timesteps' ):
scheduler.set_timesteps(A__ )
elif num_inference_steps is not None and not hasattr(A__ , 'set_timesteps' ):
_A = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_A = dummy_past_residuals[:]
_A = scheduler.step_prk(A__ , 0 , A__ , **A__ ).prev_sample
_A = scheduler.step_prk(A__ , 1 , A__ , **A__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_A = scheduler.step_plms(A__ , 0 , A__ , **A__ ).prev_sample
_A = scheduler.step_plms(A__ , 1 , A__ , **A__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ ( self : int ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=A__ )
def lowerCAmelCase_ ( self : List[Any] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A__ )
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(steps_offset=1 )
_A = scheduler_class(**A__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def lowerCAmelCase_ ( self : Optional[Any] ):
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A__ )
def lowerCAmelCase_ ( self : Tuple ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def lowerCAmelCase_ ( self : List[Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=A__ )
def lowerCAmelCase_ ( self : Tuple ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=A__ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
_A = 27
for scheduler_class in self.scheduler_classes:
_A = self.dummy_sample
_A = 0.1 * sample
_A = self.get_scheduler_config()
_A = scheduler_class(**A__ )
scheduler.set_timesteps(A__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_A = scheduler.step_prk(A__ , A__ , A__ ).prev_sample
def lowerCAmelCase_ ( self : Optional[int] ):
with self.assertRaises(A__ ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**A__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowerCAmelCase_ ( self : int ):
_A = self.full_loop()
_A = torch.sum(torch.abs(A__ ) )
_A = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def lowerCAmelCase_ ( self : List[str] ):
_A = self.full_loop(prediction_type='v_prediction' )
_A = torch.sum(torch.abs(A__ ) )
_A = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def lowerCAmelCase_ ( self : Optional[Any] ):
# We specify different beta, so that the first alpha is 0.99
_A = self.full_loop(set_alpha_to_one=A__ , beta_start=0.01 )
_A = torch.sum(torch.abs(A__ ) )
_A = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def lowerCAmelCase_ ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
_A = self.full_loop(set_alpha_to_one=A__ , beta_start=0.01 )
_A = torch.sum(torch.abs(A__ ) )
_A = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 315
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
A_ : List[str] = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
A_ : Optional[int] = 'hopper-medium-v2'
A_ : List[Any] = gym.make(env_name)
A_ : str = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
A_ : List[Any] = env.reset()
A_ : Optional[int] = 0
A_ : str = 0
A_ : Optional[Any] = 1000
A_ : Union[str, Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
A_ : Tuple = pipeline(obs, planning_horizon=32)
# execute action in environment
A_ , A_ , A_ , A_ : Dict = env.step(denorm_actions)
A_ : List[str] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
A_ : int = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''')
| 192
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = "markuplm"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=1_024 , __SCREAMING_SNAKE_CASE=216 , __SCREAMING_SNAKE_CASE=1_001 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=50 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[Any] = vocab_size
UpperCamelCase : Any = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Any = hidden_act
UpperCamelCase : Any = intermediate_size
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : Optional[int] = attention_probs_dropout_prob
UpperCamelCase : Optional[int] = max_position_embeddings
UpperCamelCase : Dict = type_vocab_size
UpperCamelCase : str = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Any = position_embedding_type
UpperCamelCase : Optional[Any] = use_cache
UpperCamelCase : Optional[int] = classifier_dropout
# additional properties
UpperCamelCase : Tuple = max_depth
UpperCamelCase : List[Any] = max_xpath_tag_unit_embeddings
UpperCamelCase : List[Any] = max_xpath_subs_unit_embeddings
UpperCamelCase : Optional[int] = tag_pad_id
UpperCamelCase : Dict = subs_pad_id
UpperCamelCase : Optional[Any] = xpath_unit_hidden_size
| 315
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase : Union[str, Any] = logging.getLogger()
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase : List[str] = parser.parse_args()
return args.f
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__SCREAMING_SNAKE_CASE , 0.666 )
@slow
@require_torch_non_multi_gpu
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__SCREAMING_SNAKE_CASE )
| 315
| 1
|
SCREAMING_SNAKE_CASE :Union[str, Any] = 0 # The first color of the flag.
SCREAMING_SNAKE_CASE :Optional[int] = 1 # The second color of the flag.
SCREAMING_SNAKE_CASE :str = 2 # The third color of the flag.
SCREAMING_SNAKE_CASE :List[Any] = (red, white, blue)
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(a_ ) == 1:
return list(a_ )
__A = 0
__A = len(a_ ) - 1
__A = 0
while mid <= high:
if sequence[mid] == colors[0]:
__A , __A = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__A , __A = sequence[high], sequence[mid]
high -= 1
else:
__A = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(a_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE :int = input('Enter numbers separated by commas:\n').strip()
SCREAMING_SNAKE_CASE :Any = [int(item.strip()) for item in user_input.split(',')]
print(f'''{dutch_national_flag_sort(unsorted)}''')
| 15
|
import math
def UpperCAmelCase ( a_ , a_ = 0 , a_ = 0 ) -> list:
"""simple docstring"""
__A = end or len(a_ )
for i in range(a_ , a_ ):
__A = i
__A = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__A = array[temp_index - 1]
temp_index -= 1
__A = temp_index_value
return array
def UpperCAmelCase ( a_ , a_ , a_ ) -> None: # Max Heap
"""simple docstring"""
__A = index
__A = 2 * index + 1 # Left Node
__A = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__A = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__A = right_index
if largest != index:
__A , __A = array[largest], array[index]
heapify(a_ , a_ , a_ )
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
__A = len(a_ )
for i in range(n // 2 , -1 , -1 ):
heapify(a_ , a_ , a_ )
for i in range(n - 1 , 0 , -1 ):
__A , __A = array[0], array[i]
heapify(a_ , 0 , a_ )
return array
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
__A = low
__A = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__A , __A = array[j], array[i]
i += 1
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if len(a_ ) == 0:
return array
__A = 2 * math.ceil(math.loga(len(a_ ) ) )
__A = 1_6
return intro_sort(a_ , 0 , len(a_ ) , a_ , a_ )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(a_ )
max_depth -= 1
__A = median_of_a(a_ , a_ , start + ((end - start) // 2) + 1 , end - 1 )
__A = partition(a_ , a_ , a_ , a_ )
intro_sort(a_ , a_ , a_ , a_ , a_ )
__A = p
return insertion_sort(a_ , a_ , a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma : ').strip()
SCREAMING_SNAKE_CASE :str = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 15
| 1
|
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : int = 2_0_0 ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
__UpperCAmelCase : Tuple = [0] * (pence + 1)
__UpperCAmelCase : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_snake_case , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 352
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-t5"""
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase )
__UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase )
__UpperCAmelCase : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" )
__UpperCAmelCase : int = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__UpperCAmelCase : Tuple = model.generate(**UpperCamelCase )
__UpperCAmelCase : Tuple = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase )
__UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__UpperCAmelCase : Tuple = model_reloaded.generate(**UpperCamelCase )
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase ) )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = """hf-internal-testing/tiny-random-t5"""
__UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase )
__UpperCAmelCase : Tuple = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(UpperCamelCase ):
model.save_pretrained(UpperCamelCase )
__UpperCAmelCase : Tuple = model.reverse_bettertransformer()
model.save_pretrained(UpperCamelCase )
| 320
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.