code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a : List[str] = ""
a : Tuple = ""
a : Dict = ""
a : List[str] = 1 # (0 is vertical, 1 is horizontal)
def __UpperCAmelCase ( ) -> None:
__snake_case , __snake_case = get_dataset(__lowerCamelCase , __lowerCamelCase )
print("Processing..." )
__snake_case , __snake_case , __snake_case = update_image_and_anno(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for index, image in enumerate(__lowerCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__snake_case = random_chars(32 )
__snake_case = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
__snake_case = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(__lowerCamelCase )} with {file_name}''' )
__snake_case = []
for anno in new_annos[index]:
__snake_case = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__lowerCamelCase )
with open(F'''/{file_root}.txt''' , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple ) -> tuple[list, list]:
__snake_case = []
__snake_case = []
for label_file in glob.glob(os.path.join(__lowerCamelCase , "*.txt" ) ):
__snake_case = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(__lowerCamelCase ) as in_file:
__snake_case = in_file.readlines()
__snake_case = os.path.join(__lowerCamelCase , F'''{label_name}.jpg''' )
__snake_case = []
for obj_list in obj_lists:
__snake_case = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCamelCase )
labels.append(__lowerCamelCase )
return img_paths, labels
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : List[str] = 1 ) -> tuple[list, list, list]:
__snake_case = []
__snake_case = []
__snake_case = []
for idx in range(len(__lowerCamelCase ) ):
__snake_case = []
__snake_case = img_list[idx]
path_list.append(__lowerCamelCase )
__snake_case = anno_list[idx]
__snake_case = cva.imread(__lowerCamelCase )
if flip_type == 1:
__snake_case = cva.flip(__lowerCamelCase , __lowerCamelCase )
for bbox in img_annos:
__snake_case = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__snake_case = cva.flip(__lowerCamelCase , __lowerCamelCase )
for bbox in img_annos:
__snake_case = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCamelCase )
new_imgs_list.append(__lowerCamelCase )
return new_imgs_list, new_annos_lists, path_list
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__snake_case = ascii_lowercase + digits
return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 69 |
import os
def __A ( ) -> Dict:
with open(os.path.dirname(__lowerCamelCase ) + """/p022_names.txt""" ) as file:
a = str(file.readlines()[0] )
a = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
a = 0
a = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
a = 0
return total_score
if __name__ == "__main__":
print(solution())
| 468 | 0 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a_ : Any = """CompVis/stable-diffusion-v1-1"""
a_ : Optional[Any] = """CompVis/stable-diffusion-v1-2"""
a_ : Tuple = """CompVis/stable-diffusion-v1-3"""
a_ : Optional[int] = """CompVis/stable-diffusion-v1-4"""
class snake_case ( lowercase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = True , ):
"""simple docstring"""
super()._init_()
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(UpperCamelCase )
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(UpperCamelCase )
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(UpperCamelCase )
lowerCamelCase_ = StableDiffusionPipeline(
vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , unet=UpperCamelCase , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , requires_safety_checker=UpperCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def snake_case ( self ):
"""simple docstring"""
return {k: getattr(self , UpperCamelCase ) for k in self.config.keys() if not k.startswith("_" )}
def snake_case ( self , UpperCamelCase = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
self.enable_attention_slicing(UpperCamelCase )
@torch.no_grad()
def snake_case ( self , UpperCamelCase , UpperCamelCase = 512 , UpperCamelCase = 512 , UpperCamelCase = 50 , UpperCamelCase = 7.5 , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = 1 , **UpperCamelCase , ):
"""simple docstring"""
return self.pipea(
prompt=UpperCamelCase , height=UpperCamelCase , width=UpperCamelCase , num_inference_steps=UpperCamelCase , guidance_scale=UpperCamelCase , negative_prompt=UpperCamelCase , num_images_per_prompt=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , output_type=UpperCamelCase , return_dict=UpperCamelCase , callback=UpperCamelCase , callback_steps=UpperCamelCase , **UpperCamelCase , )
@torch.no_grad()
def snake_case ( self , UpperCamelCase , UpperCamelCase = 512 , UpperCamelCase = 512 , UpperCamelCase = 50 , UpperCamelCase = 7.5 , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = 1 , **UpperCamelCase , ):
"""simple docstring"""
return self.pipea(
prompt=UpperCamelCase , height=UpperCamelCase , width=UpperCamelCase , num_inference_steps=UpperCamelCase , guidance_scale=UpperCamelCase , negative_prompt=UpperCamelCase , num_images_per_prompt=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , output_type=UpperCamelCase , return_dict=UpperCamelCase , callback=UpperCamelCase , callback_steps=UpperCamelCase , **UpperCamelCase , )
@torch.no_grad()
def snake_case ( self , UpperCamelCase , UpperCamelCase = 512 , UpperCamelCase = 512 , UpperCamelCase = 50 , UpperCamelCase = 7.5 , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = 1 , **UpperCamelCase , ):
"""simple docstring"""
return self.pipea(
prompt=UpperCamelCase , height=UpperCamelCase , width=UpperCamelCase , num_inference_steps=UpperCamelCase , guidance_scale=UpperCamelCase , negative_prompt=UpperCamelCase , num_images_per_prompt=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , output_type=UpperCamelCase , return_dict=UpperCamelCase , callback=UpperCamelCase , callback_steps=UpperCamelCase , **UpperCamelCase , )
@torch.no_grad()
def snake_case ( self , UpperCamelCase , UpperCamelCase = 512 , UpperCamelCase = 512 , UpperCamelCase = 50 , UpperCamelCase = 7.5 , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = 1 , **UpperCamelCase , ):
"""simple docstring"""
return self.pipea(
prompt=UpperCamelCase , height=UpperCamelCase , width=UpperCamelCase , num_inference_steps=UpperCamelCase , guidance_scale=UpperCamelCase , negative_prompt=UpperCamelCase , num_images_per_prompt=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , output_type=UpperCamelCase , return_dict=UpperCamelCase , callback=UpperCamelCase , callback_steps=UpperCamelCase , **UpperCamelCase , )
@torch.no_grad()
def snake_case ( self , UpperCamelCase , UpperCamelCase = 512 , UpperCamelCase = 512 , UpperCamelCase = 50 , UpperCamelCase = 7.5 , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = 1 , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
self.to(UpperCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCamelCase_ = self.textaimg_sda_a(
prompt=UpperCamelCase , height=UpperCamelCase , width=UpperCamelCase , num_inference_steps=UpperCamelCase , guidance_scale=UpperCamelCase , negative_prompt=UpperCamelCase , num_images_per_prompt=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , output_type=UpperCamelCase , return_dict=UpperCamelCase , callback=UpperCamelCase , callback_steps=UpperCamelCase , **UpperCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCamelCase_ = self.textaimg_sda_a(
prompt=UpperCamelCase , height=UpperCamelCase , width=UpperCamelCase , num_inference_steps=UpperCamelCase , guidance_scale=UpperCamelCase , negative_prompt=UpperCamelCase , num_images_per_prompt=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , output_type=UpperCamelCase , return_dict=UpperCamelCase , callback=UpperCamelCase , callback_steps=UpperCamelCase , **UpperCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCamelCase_ = self.textaimg_sda_a(
prompt=UpperCamelCase , height=UpperCamelCase , width=UpperCamelCase , num_inference_steps=UpperCamelCase , guidance_scale=UpperCamelCase , negative_prompt=UpperCamelCase , num_images_per_prompt=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , output_type=UpperCamelCase , return_dict=UpperCamelCase , callback=UpperCamelCase , callback_steps=UpperCamelCase , **UpperCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCamelCase_ = self.textaimg_sda_a(
prompt=UpperCamelCase , height=UpperCamelCase , width=UpperCamelCase , num_inference_steps=UpperCamelCase , guidance_scale=UpperCamelCase , negative_prompt=UpperCamelCase , num_images_per_prompt=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , output_type=UpperCamelCase , return_dict=UpperCamelCase , callback=UpperCamelCase , callback_steps=UpperCamelCase , **UpperCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 445 |
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : int = 100 ):
lowerCamelCase_ = n * (n + 1) * (2 * n + 1) / 6
lowerCamelCase_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 445 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[Any] =logging.get_logger(__name__)
A_ : Union[str, Any] ={'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __UpperCAmelCase ( __a ):
__A : Dict = 'openai-gpt'
__A : Optional[int] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowerCamelCase=4_0478 , _lowerCamelCase=512 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1E-5 , _lowerCamelCase=0.02 , _lowerCamelCase="cls_index" , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=0.1 , **_lowerCamelCase , ):
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = n_positions
lowerCAmelCase_ = n_embd
lowerCAmelCase_ = n_layer
lowerCAmelCase_ = n_head
lowerCAmelCase_ = afn
lowerCAmelCase_ = resid_pdrop
lowerCAmelCase_ = embd_pdrop
lowerCAmelCase_ = attn_pdrop
lowerCAmelCase_ = layer_norm_epsilon
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = summary_type
lowerCAmelCase_ = summary_use_proj
lowerCAmelCase_ = summary_activation
lowerCAmelCase_ = summary_first_dropout
lowerCAmelCase_ = summary_proj_to_labels
super().__init__(**_lowerCamelCase )
| 274 | '''simple docstring'''
from __future__ import annotations
def snake_case_ ( __snake_case : list[int | str]) -> None:
create_state_space_tree(__snake_case , [] , 0 , [0 for i in range(len(__snake_case))])
def snake_case_ ( __snake_case : list[int | str] , __snake_case : list[int | str] , __snake_case : int , __snake_case : list[int] , ) -> None:
if index == len(__snake_case):
print(__snake_case)
return
for i in range(len(__snake_case)):
if not index_used[i]:
current_sequence.append(sequence[i])
lowerCAmelCase_ = True
create_state_space_tree(__snake_case , __snake_case , index + 1 , __snake_case)
current_sequence.pop()
lowerCAmelCase_ = False
A_ : list[int | str] =[3, 1, 2, 4]
generate_all_permutations(sequence)
A_ : list[int | str] =["A", "B", "C"]
generate_all_permutations(sequence_a)
| 274 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def lowercase_ ( __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : Tuple = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowerCAmelCase__ : List[Any] = 1024
lowerCAmelCase__ : Union[str, Any] = 4096
lowerCAmelCase__ : List[Any] = 24
lowerCAmelCase__ : List[str] = 16
lowerCAmelCase__ : Union[str, Any] = [5, 11, 17, 23]
lowerCAmelCase__ : int = [256, 512, 1024, 1024]
lowerCAmelCase__ : Tuple = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCAmelCase__ : Dict = 768
lowerCAmelCase__ : List[Any] = [1, 1, 1, 0.5]
lowerCAmelCase__ : Optional[int] = [256, 512, 768, 768]
lowerCAmelCase__ : str = 150
lowerCAmelCase__ : List[Any] = 16
lowerCAmelCase__ : Optional[int] = (1, 384, 384)
lowerCAmelCase__ : int = False
lowerCAmelCase__ : Tuple = "project"
if "ade" in checkpoint_url:
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : List[str] = 768
lowerCAmelCase__ : Optional[int] = [1, 1, 1, 0.5]
lowerCAmelCase__ : Dict = 150
lowerCAmelCase__ : Dict = 16
lowerCAmelCase__ : str = "huggingface/label-files"
lowerCAmelCase__ : Optional[int] = "ade20k-id2label.json"
lowerCAmelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
lowerCAmelCase__ : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Tuple = idalabel
lowerCAmelCase__ : Any = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : List[Any] = [1, 150, 480, 480]
return config, expected_shape
def lowercase_ ( __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def lowercase_ ( __UpperCAmelCase ) -> Union[str, Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCAmelCase__ : str = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCAmelCase__ : List[str] = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCAmelCase__ : str = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowerCAmelCase__ : Optional[int] = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCAmelCase__ : Union[str, Any] = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCAmelCase__ : Dict = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase__ : List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowerCAmelCase__ : Union[str, Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowerCAmelCase__ : Tuple = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCAmelCase__ : Any = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCAmelCase__ : Dict = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCAmelCase__ : List[Any] = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCAmelCase__ : str = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCAmelCase__ : List[Any] = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCAmelCase__ : int = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCAmelCase__ : Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCAmelCase__ : List[str] = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCAmelCase__ : List[Any] = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCAmelCase__ : Tuple = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCAmelCase__ : Optional[int] = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCAmelCase__ : List[str] = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCAmelCase__ : int = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCAmelCase__ : Dict = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCAmelCase__ : int = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCAmelCase__ : Tuple = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCAmelCase__ : List[Any] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCAmelCase__ : Any = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCAmelCase__ : str = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCAmelCase__ : Optional[int] = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCAmelCase__ : str = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCAmelCase__ : Optional[int] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCAmelCase__ : List[Any] = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCAmelCase__ : List[Any] = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowerCAmelCase__ : int = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowerCAmelCase__ : Dict = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowerCAmelCase__ : Optional[int] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowerCAmelCase__ : Optional[int] = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowerCAmelCase__ : Tuple = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowerCAmelCase__ : List[Any] = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowerCAmelCase__ : List[str] = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowerCAmelCase__ : int = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : Optional[int] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowerCAmelCase__ : int = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Any = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase__ : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Dict = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : List[Any] = in_proj_bias[-config.hidden_size :]
def lowercase_ ( ) -> Optional[int]:
lowerCAmelCase__ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ : List[str] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : List[str] = get_dpt_config(_lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCAmelCase__ : Optional[int] = torch.load(_lowerCAmelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(_lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase__ : List[Any] = state_dict.pop(_lowerCAmelCase )
lowerCAmelCase__ : Union[str, Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
lowerCAmelCase__ : Any = DPTForSemanticSegmentation(_lowerCAmelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Check outputs on an image
lowerCAmelCase__ : Dict = 480 if "ade" in checkpoint_url else 384
lowerCAmelCase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCAmelCase )
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : Any = image_processor(_lowerCAmelCase , return_tensors="""pt""" )
# forward pass
lowerCAmelCase__ : Dict = model(**_lowerCAmelCase ).logits if "ade" in checkpoint_url else model(**_lowerCAmelCase ).predicted_depth
if show_prediction:
lowerCAmelCase__ : Dict = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=_lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you\'re pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
_A = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 711 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 6_3_7_8_1_3_7
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
lowerCAmelCase__ : str = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowerCAmelCase__ : Optional[int] = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
lowerCAmelCase__ : List[Any] = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowerCAmelCase__ : Any = haversine_distance(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowerCAmelCase__ : int = (b_lata + b_lata) / 2
lowerCAmelCase__ : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowerCAmelCase__ : Optional[int] = (sin(__UpperCAmelCase ) ** 2) * (cos(__UpperCAmelCase ) ** 2)
lowerCAmelCase__ : Dict = cos(sigma / 2 ) ** 2
lowerCAmelCase__ : Union[str, Any] = (sigma - sin(__UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowerCAmelCase__ : Tuple = (cos(__UpperCAmelCase ) ** 2) * (sin(__UpperCAmelCase ) ** 2)
lowerCAmelCase__ : int = sin(sigma / 2 ) ** 2
lowerCAmelCase__ : int = (sigma + sin(__UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 507 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _a ( unittest.TestCase):
__magic_name__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__magic_name__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __lowercase ( self : List[Any] , _lowercase : Optional[int] , _lowercase : Any , _lowercase : str ) -> Tuple:
snake_case : List[str] = TextaTextGenerationPipeline(model=_lowercase , tokenizer=_lowercase )
return generator, ["Something to write", "Something else"]
def __lowercase ( self : Optional[int] , _lowercase : int , _lowercase : Optional[Any] ) -> str:
snake_case : List[str] = generator("Something there" )
self.assertEqual(_lowercase , [{"generated_text": ANY(_lowercase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
snake_case : int = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
[{"generated_text": ANY(_lowercase )}, {"generated_text": ANY(_lowercase )}],
[{"generated_text": ANY(_lowercase )}, {"generated_text": ANY(_lowercase )}],
] , )
snake_case : Dict = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
[{"generated_text": ANY(_lowercase )}, {"generated_text": ANY(_lowercase )}],
[{"generated_text": ANY(_lowercase )}, {"generated_text": ANY(_lowercase )}],
] , )
with self.assertRaises(_lowercase ):
generator(4 )
@require_torch
def __lowercase ( self : List[str] ) -> Tuple:
snake_case : Optional[Any] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
snake_case : Optional[Any] = generator("Something there" , do_sample=_lowercase )
self.assertEqual(_lowercase , [{"generated_text": ""}] )
snake_case : Dict = 3
snake_case : List[str] = generator(
"Something there" , num_return_sequences=_lowercase , num_beams=_lowercase , )
snake_case : Optional[Any] = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(_lowercase , _lowercase )
snake_case : Optional[int] = generator("This is a test" , do_sample=_lowercase , num_return_sequences=2 , return_tensors=_lowercase )
self.assertEqual(
_lowercase , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
snake_case : Any = generator.model.config.eos_token_id
snake_case : List[str] = "<pad>"
snake_case : Any = generator(
["This is a test", "This is a second test"] , do_sample=_lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=_lowercase , )
self.assertEqual(
_lowercase , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def __lowercase ( self : Union[str, Any] ) -> Tuple:
snake_case : Tuple = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
snake_case : Tuple = generator("Something there" , do_sample=_lowercase )
self.assertEqual(_lowercase , [{"generated_text": ""}] )
| 449 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
A = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A = spec.loader.load_module()
A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
A = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
snake_case : Union[str, Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case : List[str] = False
# source code of `config_class`
snake_case : Optional[int] = inspect.getsource(lowerCamelCase_ )
snake_case : Optional[int] = _re_checkpoint.findall(lowerCamelCase_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case , snake_case : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case : List[str] = True
break
snake_case : Any = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
snake_case : List[str] = "\n".join(sorted(lowerCamelCase_ ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 449 | 1 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( _A , _A , _A ) -> float:
"""simple docstring"""
_UpperCAmelCase = x
_UpperCAmelCase = y
for step in range(_A ): # noqa: B007
_UpperCAmelCase = a * a - b * b + x
_UpperCAmelCase = 2 * a * b + y
_UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_A , 1 , 1 ) )
def _UpperCamelCase ( _A = 8_0_0 , _A = 6_0_0 , _A = -0.6 , _A = 0 , _A = 3.2 , _A = 5_0 , _A = True , ) -> Image.Image:
"""simple docstring"""
_UpperCAmelCase = Image.new("""RGB""" , (image_width, image_height) )
_UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(_A ):
for image_y in range(_A ):
# determine the figure-coordinates based on the image-coordinates
_UpperCAmelCase = figure_width / image_width * image_height
_UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
_UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
_UpperCAmelCase = get_distance(_A , _A , _A )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_UpperCAmelCase = get_color_coded_rgb(_A )
else:
_UpperCAmelCase = get_black_and_white_rgb(_A )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 19 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : List[Any] = get_logger()
a : Optional[dict] = None
class a_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : int ) ->Tuple:
'''simple docstring'''
super().__init__(features=__UpperCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__UpperCamelCase )}, as `jaxlib.xla_extension.Device` """
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
_UpperCAmelCase = device if isinstance(__UpperCamelCase , __UpperCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
_UpperCAmelCase = str(jax.devices()[0] )
_UpperCAmelCase = jnp_array_kwargs
@staticmethod
def _snake_case ( ) ->Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(__UpperCamelCase ): device for device in jax.devices()}
def _snake_case ( self : Dict , __UpperCamelCase : Any ) ->Union[str, Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , __UpperCamelCase ) and column:
if all(
isinstance(__UpperCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__UpperCamelCase , axis=0 )
return column
def _snake_case ( self : List[str] , __UpperCamelCase : Any ) ->Optional[int]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ):
return value
elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase = {}
if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
else:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCamelCase , PIL.Image.Image ):
_UpperCAmelCase = np.asarray(__UpperCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__UpperCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__UpperCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__UpperCamelCase , """__array__""" ) and not isinstance(__UpperCamelCase , jax.Array ):
_UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
elif isinstance(__UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : dict ) ->int:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_row(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_row(__UpperCamelCase )
return self.recursive_tensorize(__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : pa.Table ) ->"jax.Array":
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_column(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
_UpperCAmelCase = self._consolidate(__UpperCamelCase )
return column
def _snake_case ( self : Optional[Any] , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_batch(__UpperCamelCase )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
for column_name in batch:
_UpperCAmelCase = self._consolidate(batch[column_name] )
return batch | 19 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class A__ :
"""simple docstring"""
def __init__( self: Any , __a: Collection[float] | None = None )-> None:
if components is None:
lowerCamelCase : Optional[Any] = []
lowerCamelCase : Optional[int] = list(__a )
def __len__( self: Union[str, Any] )-> int:
return len(self.__components )
def __str__( self: Optional[int] )-> str:
return "(" + ",".join(map(__a , self.__components ) ) + ")"
def __add__( self: Optional[Any] , __a: Vector )-> Vector:
lowerCamelCase : Tuple = len(self )
if size == len(__a ):
lowerCamelCase : Union[str, Any] = [self.__components[i] + other.component(__a ) for i in range(__a )]
return Vector(__a )
else:
raise Exception("""must have the same size""" )
def __sub__( self: Any , __a: Vector )-> Vector:
lowerCamelCase : Union[str, Any] = len(self )
if size == len(__a ):
lowerCamelCase : Dict = [self.__components[i] - other.component(__a ) for i in range(__a )]
return Vector(__a )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self: Union[str, Any] , __a: float )-> Vector:
...
@overload
def __mul__( self: Tuple , __a: Vector )-> float:
...
def __mul__( self: Optional[int] , __a: float | Vector )-> float | Vector:
if isinstance(__a , (float, int) ):
lowerCamelCase : List[str] = [c * other for c in self.__components]
return Vector(__a )
elif isinstance(__a , __a ) and len(self ) == len(__a ):
lowerCamelCase : Union[str, Any] = len(self )
lowerCamelCase : Dict = [self.__components[i] * other.component(__a ) for i in range(__a )]
return sum(__a )
else: # error case
raise Exception("""invalid operand!""" )
def a__ ( self: List[Any] )-> Vector:
return Vector(self.__components )
def a__ ( self: Tuple , __a: int )-> float:
if isinstance(__a , __a ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def a__ ( self: List[str] , __a: int , __a: float )-> None:
assert -len(self.__components ) <= pos < len(self.__components )
lowerCamelCase : List[str] = value
def a__ ( self: Dict )-> float:
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
lowerCamelCase : Optional[int] = [c**2 for c in self.__components]
return math.sqrt(sum(__a ) )
def a__ ( self: List[str] , __a: Vector , __a: bool = False )-> float:
lowerCamelCase : int = self * other
lowerCamelCase : Union[str, Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def snake_case ( UpperCamelCase__ : int ) -> Vector:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
return Vector([0] * dimension )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Vector:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (isinstance(UpperCamelCase__ , UpperCamelCase__ ))
lowerCamelCase : List[Any] = [0] * dimension
lowerCamelCase : Any = 1
return Vector(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : Vector , UpperCamelCase__ : Vector ) -> Vector:
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and (isinstance(UpperCamelCase__ , (int, float) ))
)
return x * scalar + y
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Vector:
random.seed(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = [random.randint(UpperCamelCase__ , UpperCamelCase__ ) for _ in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
class A__ :
"""simple docstring"""
def __init__( self: Any , __a: list[list[float]] , __a: int , __a: int )-> None:
lowerCamelCase : Dict = matrix
lowerCamelCase : Optional[int] = w
lowerCamelCase : List[str] = h
def __str__( self: List[str] )-> str:
lowerCamelCase : List[Any] = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self: Optional[int] , __a: Matrix )-> Matrix:
if self.__width == other.width() and self.__height == other.height():
lowerCamelCase : Optional[Any] = []
for i in range(self.__height ):
lowerCamelCase : Dict = [
self.__matrix[i][j] + other.component(__a , __a )
for j in range(self.__width )
]
matrix.append(__a )
return Matrix(__a , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self: Any , __a: Matrix )-> Matrix:
if self.__width == other.width() and self.__height == other.height():
lowerCamelCase : Optional[Any] = []
for i in range(self.__height ):
lowerCamelCase : Optional[int] = [
self.__matrix[i][j] - other.component(__a , __a )
for j in range(self.__width )
]
matrix.append(__a )
return Matrix(__a , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self: Optional[int] , __a: float )-> Matrix:
...
@overload
def __mul__( self: Tuple , __a: Vector )-> Vector:
...
def __mul__( self: List[Any] , __a: float | Vector )-> Vector | Matrix:
if isinstance(__a , __a ): # matrix-vector
if len(__a ) == self.__width:
lowerCamelCase : List[str] = zero_vector(self.__height )
for i in range(self.__height ):
lowerCamelCase : List[Any] = [
self.__matrix[i][j] * other.component(__a )
for j in range(self.__width )
]
ans.change_component(__a , sum(__a ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(__a , (int, float) ): # matrix-scalar
lowerCamelCase : List[Any] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__a , self.__width , self.__height )
return None
def a__ ( self: Union[str, Any] )-> int:
return self.__height
def a__ ( self: Tuple )-> int:
return self.__width
def a__ ( self: Any , __a: int , __a: int )-> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def a__ ( self: Dict , __a: int , __a: int , __a: float )-> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
lowerCamelCase : Optional[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def a__ ( self: int , __a: int , __a: int )-> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
lowerCamelCase : Tuple = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__a ) ):
lowerCamelCase : Optional[Any] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__a , self.__width - 1 , self.__height - 1 ).determinant()
def a__ ( self: List[Any] , __a: int , __a: int )-> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__a , __a )
else:
raise Exception("""Indices out of bounds""" )
def a__ ( self: List[str] )-> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowerCamelCase : List[str] = [
self.__matrix[0][y] * self.cofactor(0 , __a ) for y in range(self.__width )
]
return sum(__a )
def snake_case ( UpperCamelCase__ : int ) -> Matrix:
lowerCamelCase : list[list[float]] = [[0] * n for _ in range(UpperCamelCase__ )]
return Matrix(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Matrix:
random.seed(UpperCamelCase__ )
lowerCamelCase : list[list[float]] = [
[random.randint(UpperCamelCase__ , UpperCamelCase__ ) for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )
]
return Matrix(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 222 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase :Optional[Any] = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :List[Any] = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 222 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 175 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( snake_case ) -> list[int]:
if len(snake_case ) == 0:
return array
_UpperCAmelCase , _UpperCAmelCase = min(snake_case ), max(snake_case )
# Compute the variables
_UpperCAmelCase = _max - _min + 1
_UpperCAmelCase , _UpperCAmelCase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_UpperCAmelCase = i - _min
_UpperCAmelCase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_UpperCAmelCase = 0
for i in range(snake_case ):
while holes_repeat[i] > 0:
_UpperCAmelCase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
a = input("Enter numbers separated by comma:\n")
a = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted)) | 175 | 1 |
from typing import List
import numpy as np
def __lowerCAmelCase ( A_ : dict ) -> int:
__UpperCAmelCase = {key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n"
+ "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
__UpperCAmelCase = max(lists_lengths.values() , default=0 )
return max(1 , __snake_case )
def __lowerCAmelCase ( A_ : int , A_ : int ) -> List[range]:
__UpperCAmelCase = []
for group_idx in range(__snake_case ):
__UpperCAmelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__UpperCAmelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__UpperCAmelCase = range(__snake_case , start + num_shards_to_add )
shards_indices_per_group.append(__snake_case )
return shards_indices_per_group
def __lowerCAmelCase ( A_ : dict , A_ : int ) -> List[dict]:
__UpperCAmelCase = _number_of_shards_in_gen_kwargs(__snake_case )
if num_shards == 1:
return [dict(__snake_case )]
else:
__UpperCAmelCase = _distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__snake_case , __snake_case )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__snake_case ) )
]
def __lowerCAmelCase ( A_ : List[dict] ) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __snake_case )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __lowerCAmelCase ( A_ : np.random.Generator , A_ : dict ) -> dict:
__UpperCAmelCase = {len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )}
__UpperCAmelCase = {}
for size in list_sizes:
__UpperCAmelCase = list(range(__snake_case ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__UpperCAmelCase = dict(__snake_case )
for key, value in shuffled_kwargs.items():
if isinstance(__snake_case , __snake_case ):
__UpperCAmelCase = [value[i] for i in indices_per_size[len(__snake_case )]]
return shuffled_kwargs
| 221 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =BertConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ =BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 676 | 0 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__snake_case :List[str] = HfApi()
__snake_case :str = {}
# fmt: off
__snake_case :Optional[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
__snake_case :Union[str, Any] = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
__snake_case :str = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
__snake_case :List[Any] = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
__snake_case :Any = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
__snake_case :List[str] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
__snake_case :Optional[int] = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
__snake_case :Tuple = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
__snake_case :List[Any] = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
__snake_case :Optional[Any] = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
__snake_case :Optional[Any] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
__snake_case :List[str] = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
__snake_case :Any = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
__snake_case :List[str] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
__snake_case :Union[str, Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
__snake_case :List[Any] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__snake_case :List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
__snake_case :Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
__snake_case :str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__snake_case :List[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__snake_case :List[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__snake_case :Any = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 60 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( _UpperCAmelCase ):
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(_UpperCAmelCase )
return 2.0 * image - 1.0
class _A ( __UpperCAmelCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : UNetaDModel , __SCREAMING_SNAKE_CASE : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE)
@torch.no_grad()
def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : Optional[int] = 100 , __SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = 1
elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
__a = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE)}')
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = preprocess(__SCREAMING_SNAKE_CASE)
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters()).dtype
__a = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE)
__a = image.to(device=self.device , dtype=__SCREAMING_SNAKE_CASE)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(__SCREAMING_SNAKE_CASE):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1)
__a = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# predict the noise residual
__a = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(__SCREAMING_SNAKE_CASE).sample
__a = torch.clamp(__SCREAMING_SNAKE_CASE , -1.0 , 1.0)
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
| 60 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_lowerCAmelCase = """hf-internal-testing/tiny-random-bert"""
_lowerCAmelCase = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
_lowerCAmelCase = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = cached_file(_A ,_A )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_A ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_A ,_A ) ) )
with open(os.path.join(_A ,'refs' ,'main' ) ) as f:
_lowerCAmelCase : Optional[int] = f.read()
self.assertEqual(_A ,os.path.join(_A ,'snapshots' ,_A ,_A ) )
self.assertTrue(os.path.isfile(_A ) )
# File is cached at the same place the second time.
_lowerCAmelCase : Optional[Any] = cached_file(_A ,_A )
self.assertEqual(_A ,_A )
# Using a specific revision to test the full commit hash.
_lowerCAmelCase : Tuple = cached_file(_A ,_A ,revision='9b8c223' )
self.assertEqual(_A ,os.path.join(_A ,'snapshots' ,_A ,_A ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_A ,'is not a valid model identifier' ):
_lowerCAmelCase : Dict = cached_file('tiny-random-bert' ,_A )
with self.assertRaisesRegex(_A ,'is not a valid git identifier' ):
_lowerCAmelCase : Union[str, Any] = cached_file(_A ,_A ,revision='aaaa' )
with self.assertRaisesRegex(_A ,'does not appear to have a file named' ):
_lowerCAmelCase : Optional[Any] = cached_file(_A ,'conf' )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_A ,'does not appear to have a file named' ):
_lowerCAmelCase : Tuple = cached_file(_A ,'conf' )
with open(os.path.join(_A ,'refs' ,'main' ) ) as f:
_lowerCAmelCase : int = f.read()
self.assertTrue(os.path.isfile(os.path.join(_A ,'.no_exist' ,_A ,'conf' ) ) )
_lowerCAmelCase : Optional[int] = cached_file(_A ,'conf' ,_raise_exceptions_for_missing_entries=_A )
self.assertIsNone(_A )
_lowerCAmelCase : Dict = cached_file(_A ,'conf' ,local_files_only=_A ,_raise_exceptions_for_missing_entries=_A )
self.assertIsNone(_A )
_lowerCAmelCase : Optional[Any] = mock.Mock()
_lowerCAmelCase : Any = 500
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : List[str] = HTTPError
_lowerCAmelCase : Tuple = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=_A ) as mock_head:
_lowerCAmelCase : Tuple = cached_file(_A ,'conf' ,_raise_exceptions_for_connection_errors=_A )
self.assertIsNone(_A )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' ,_A ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' ,_A ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' ,_A ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' ,'ahah.txt' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_A ,'is not a valid model identifier' ):
get_file_from_repo('bert-base-case' ,_A )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_A ,'is not a valid git identifier' ):
get_file_from_repo('bert-base-cased' ,_A ,revision='ahaha' )
_lowerCAmelCase : int = get_file_from_repo('bert-base-cased' ,_A )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowerCAmelCase : Optional[int] = json.loads(open(_A ,'r' ).read() )
self.assertEqual(config['hidden_size'] ,768 )
def __lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : List[str] = Path(_A ) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_A ,'a.txt' ) ,str(_A ) )
self.assertIsNone(get_file_from_repo(_A ,'b.txt' ) )
| 259 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A )
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = 100 ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
if audio_length_in_s is None:
_lowerCAmelCase : str = self.unet.config.sample_size / self.unet.config.sample_rate
_lowerCAmelCase : List[str] = audio_length_in_s * self.unet.config.sample_rate
_lowerCAmelCase : int = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
_lowerCAmelCase : Tuple = int(_A )
if sample_size % down_scale_factor != 0:
_lowerCAmelCase : Any = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
' process.' )
_lowerCAmelCase : List[Any] = int(_A )
_lowerCAmelCase : Dict = next(iter(self.unet.parameters() ) ).dtype
_lowerCAmelCase : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A ,_A ) and len(_A ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_lowerCAmelCase : List[Any] = randn_tensor(_A ,generator=_A ,device=self.device ,dtype=_A )
# set step values
self.scheduler.set_timesteps(_A ,device=audio.device )
_lowerCAmelCase : List[Any] = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCAmelCase : Any = self.unet(_A ,_A ).sample
# 2. compute previous image: x_t -> t_t-1
_lowerCAmelCase : Tuple = self.scheduler.step(_A ,_A ,_A ).prev_sample
_lowerCAmelCase : List[Any] = audio.clamp(-1 ,1 ).float().cpu().numpy()
_lowerCAmelCase : Optional[Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 259 | 1 |
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : dict , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ , lowercase__ : int = set(_lowerCAmelCase ), [start]
while stack:
lowercase__ : Dict = stack.pop()
explored.add(_lowerCAmelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_lowerCAmelCase )
return explored
_UpperCamelCase : Tuple = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 645 | """simple docstring"""
import math
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case = '''
Human: <<task>>
Assistant: '''
snake_case = '''huggingface-tools/default-prompts'''
snake_case = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="run" ) -> List[Any]:
if prompt_or_repo_id is None:
_snake_case = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , lowerCAmelCase_ ) is not None:
return prompt_or_repo_id
_snake_case = cached_file(
lowerCAmelCase_ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 103 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
def UpperCamelCase__ ( self : str , UpperCamelCase__ : str ) -> Tuple:
with open(UpperCamelCase__ , encoding='''utf-8''' ) as input_file:
_UpperCamelCase =re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
_UpperCamelCase =input_file.read()
_UpperCamelCase =regexp.search(UpperCamelCase__ )
return match
def UpperCamelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ) -> str:
with open(UpperCamelCase__ , encoding='''utf-8''' ) as input_file:
_UpperCamelCase =re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
_UpperCamelCase =input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase =regexp.finditer(UpperCamelCase__ )
_UpperCamelCase =[match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase__ ( self : int ) -> Optional[Any]:
_UpperCamelCase =Path('''./datasets''' )
_UpperCamelCase =list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCamelCase__ ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def UpperCamelCase__ ( self : Any ) -> Optional[int]:
_UpperCamelCase =Path('''./datasets''' )
_UpperCamelCase =list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCamelCase__ ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 404 | 0 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
SCREAMING_SNAKE_CASE__ = imread(r"digital_image_processing/image_data/lena_small.jpg")
SCREAMING_SNAKE_CASE__ = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = cn.convert_to_negative(a_ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase__ ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase = canny.canny(a_ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase__ ( ):
'''simple docstring'''
assert gg.gaussian_filter(a_ , 5 , sigma=0.9 ).all()
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase = conv.img_convolve(a_ , a_ ).astype(a_ )
assert res.any()
def UpperCAmelCase__ ( ):
'''simple docstring'''
assert med.median_filter(a_ , 3 ).any()
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = sob.sobel_filter(a_ )
assert grad.any() and theta.any()
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = sp.make_sepia(a_ , 20 )
assert sepia.all()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
lowerCAmelCase = bs.Burkes(imread(a_ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
lowerCAmelCase = rs.NearestNeighbour(imread(a_ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCAmelCase = imread(a_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = image[x_coordinate][y_coordinate]
lowerCAmelCase = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase = lbp.local_binary_value(a_ , a_ , a_ )
assert lbp_image.any()
| 705 |
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
SCREAMING_SNAKE_CASE__ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
SCREAMING_SNAKE_CASE__ = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
SCREAMING_SNAKE_CASE__ = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.img_to_array(test_image)
SCREAMING_SNAKE_CASE__ = np.expand_dims(test_image, axis=0)
SCREAMING_SNAKE_CASE__ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
SCREAMING_SNAKE_CASE__ = "Normal"
if result[0][0] == 1:
SCREAMING_SNAKE_CASE__ = "Abnormality detected"
| 393 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase_ = {
"""facebook/nllb-large-en-ro""": 1_0_2_4,
"""facebook/nllb-200-distilled-600M""": 1_0_2_4,
}
# fmt: off
UpperCAmelCase_ = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Tuple = VOCAB_FILES_NAMES
a__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a__ : Union[str, Any] = ["input_ids", "attention_mask"]
a__ : List[Any] = NllbTokenizer
a__ : List[int] = []
a__ : List[int] = []
def __init__( self : List[str] , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : Optional[int]="<s>" , __lowerCAmelCase : Optional[int]="<unk>" , __lowerCAmelCase : Dict="<pad>" , __lowerCAmelCase : List[str]="<mask>" , __lowerCAmelCase : str=None , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Union[str, Any]=False , **__lowerCAmelCase : int , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
_A = legacy_behaviour
super().__init__(
vocab_file=__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , legacy_behaviour=__lowerCAmelCase , **__lowerCAmelCase , )
_A = vocab_file
_A = False if not self.vocab_file else True
_A = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
_A = {
lang_code: self.convert_tokens_to_ids(__lowerCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A = src_lang if src_lang is not None else '''eng_Latn'''
_A = self.convert_tokens_to_ids(self._src_lang )
_A = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case_ ( self : Any ) -> str:
return self._src_lang
@src_lang.setter
def snake_case_ ( self : int , __lowerCAmelCase : str ) -> None:
_A = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ ( self : str , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] , __lowerCAmelCase : Optional[str] , **__lowerCAmelCase : Dict ) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_A = src_lang
_A = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
_A = self.convert_tokens_to_ids(__lowerCAmelCase )
_A = tgt_lang_id
return inputs
def snake_case_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = "eng_Latn" , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : str = "fra_Latn" , **__lowerCAmelCase : str , ) -> BatchEncoding:
_A = src_lang
_A = tgt_lang
return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : Dict ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ ( self : Any ) -> Dict:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ ( self : str , __lowerCAmelCase : Dict ) -> None:
_A = self.convert_tokens_to_ids(__lowerCAmelCase )
if self.legacy_behaviour:
_A = []
_A = [self.eos_token_id, self.cur_lang_code]
else:
_A = [self.cur_lang_code]
_A = [self.eos_token_id]
_A = self.convert_ids_to_tokens(self.prefix_tokens )
_A = self.convert_ids_to_tokens(self.suffix_tokens )
_A = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> None:
_A = self.convert_tokens_to_ids(__lowerCAmelCase )
if self.legacy_behaviour:
_A = []
_A = [self.eos_token_id, self.cur_lang_code]
else:
_A = [self.cur_lang_code]
_A = [self.eos_token_id]
_A = self.convert_ids_to_tokens(self.prefix_tokens )
_A = self.convert_ids_to_tokens(self.suffix_tokens )
_A = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def snake_case_ ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
return (out_vocab_file,)
| 2 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = ["""pixel_values"""]
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = 3_2 , __lowerCAmelCase=PILImageResampling.BILINEAR , __lowerCAmelCase = True , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = do_resize
lowerCamelCase__ = do_rescale
lowerCamelCase__ = size_divisor
lowerCamelCase__ = resample
super().__init__(**__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = get_image_size(__lowerCAmelCase )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCamelCase__ = height // size_divisor * size_divisor
lowerCamelCase__ = width // size_divisor * size_divisor
lowerCamelCase__ = resize(__lowerCAmelCase , (new_h, new_w) , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
return image
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase ):
'''simple docstring'''
return rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ = size_divisor if size_divisor is not None else self.size_divisor
lowerCamelCase__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
lowerCamelCase__ = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
lowerCamelCase__ = [to_numpy_array(__lowerCAmelCase ) for img in images]
if do_resize:
lowerCamelCase__ = [self.resize(__lowerCAmelCase , size_divisor=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase__ = [self.rescale(__lowerCAmelCase , scale=1 / 2_5_5 ) for image in images]
lowerCamelCase__ = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
lowerCamelCase__ = {'''pixel_values''': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 481 | 0 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class snake_case_ ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = "arrow" , **__lowerCAmelCase , ):
super().__init__(
split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , **__lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ : Dict = load_from_cache_file
SCREAMING_SNAKE_CASE_ : List[str] = file_format
SCREAMING_SNAKE_CASE_ : List[Any] = Spark(
df=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , working_dir=__lowerCAmelCase , **__lowerCAmelCase , )
def __A ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
SCREAMING_SNAKE_CASE_ : Optional[int] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 311 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowerCAmelCase__: List[Any] = logging.get_logger(__name__)
class snake_case_ :
__lowerCamelCase : Any = None
@experimental
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return _map_with_joblib(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_proc if num_proc <= len(SCREAMING_SNAKE_CASE ) else len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = [] # We organize the splits ourselve (contiguous splits)
for index in range(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[str] = len(SCREAMING_SNAKE_CASE ) // num_proc
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(SCREAMING_SNAKE_CASE ) % num_proc
SCREAMING_SNAKE_CASE_ : List[Any] = div * index + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(SCREAMING_SNAKE_CASE ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'Error dividing inputs iterable among processes. '
f'Total number of objects {len(SCREAMING_SNAKE_CASE )}, '
f'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
f'Spawning {num_proc} processes for {len(SCREAMING_SNAKE_CASE )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = None, None
if not disable_tqdm:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = (RLock(),), tqdm.set_lock
with Pool(SCREAMING_SNAKE_CASE , initargs=SCREAMING_SNAKE_CASE , initializer=SCREAMING_SNAKE_CASE ) as pool:
SCREAMING_SNAKE_CASE_ : Optional[int] = pool.map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info(f'Finished {num_proc} processes' )
SCREAMING_SNAKE_CASE_ : List[str] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f'Unpacked {len(SCREAMING_SNAKE_CASE )} objects' )
return mapped
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=SCREAMING_SNAKE_CASE ):
return joblib.Parallel()(
joblib.delayed(SCREAMING_SNAKE_CASE )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Tuple:
SCREAMING_SNAKE_CASE_ : str = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
SCREAMING_SNAKE_CASE_ : Dict = None
| 311 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 224 |
"""simple docstring"""
from timeit import timeit
UpperCAmelCase__ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCAmelCase ( __lowerCamelCase : str ) -> bool:
_snake_case = 0
_snake_case = len(__lowerCamelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCAmelCase ( __lowerCamelCase : str ) -> bool:
_snake_case = len(__lowerCamelCase ) // 2
_snake_case = len(__lowerCamelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(__lowerCamelCase ) )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> bool:
if len(__lowerCamelCase ) <= 2:
return True
if s[0] == s[len(__lowerCamelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCAmelCase ( __lowerCamelCase : str ) -> bool:
return s == s[::-1]
def _UpperCAmelCase ( __lowerCamelCase : str ) -> None:
_snake_case = f'''all({name}(key) is value for key, value in test_data.items())'''
_snake_case = f'''from __main__ import test_data, {name}'''
_snake_case = 50_00_00
_snake_case = timeit(stmt=__lowerCamelCase , setup=__lowerCamelCase , number=__lowerCamelCase )
print(f'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"{key:21} {value}")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 224 | 1 |
"""simple docstring"""
import math
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
return math.pow(snake_case__ , 2 ) - a
def A ( snake_case__ ):
'''simple docstring'''
return 2 * x
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 2.0
while start <= a:
SCREAMING_SNAKE_CASE__ = math.pow(snake_case__ , 2 )
return start
def A ( snake_case__ , snake_case__ = 99_99 , snake_case__ = 0.00_00_00_00_00_00_01 ):
'''simple docstring'''
if a < 0:
raise ValueError("""math domain error""" )
SCREAMING_SNAKE_CASE__ = get_initial_point(snake_case__ )
for _ in range(snake_case__ ):
SCREAMING_SNAKE_CASE__ = value
SCREAMING_SNAKE_CASE__ = value - fx(snake_case__ , snake_case__ ) / fx_derivative(snake_case__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def A ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def A ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def A ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case__ ):
http_head("""https://huggingface.co""" )
| 616 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase__ ='\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n'
lowercase__ ='\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
lowercase__ ='\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase__ ( self ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , ):
a_ = len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
a_ = [[refs[i] for refs in references] for i in range(snake_case__ )]
a_ = TER(
normalized=snake_case__ , no_punct=snake_case__ , asian_support=snake_case__ , case_sensitive=snake_case__ , )
a_ = sb_ter.corpus_score(snake_case__ , snake_case__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 263 |
import qiskit
def __magic_name__ ( lowercase , lowercase ) -> qiskit.result.counts.Counts:
"""simple docstring"""
lowercase_ : Dict = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
lowercase_ : str = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowercase_ : List[Any] = qiskit.execute(lowercase , lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 458 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
UpperCAmelCase = "table-transformer"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=100 , __UpperCamelCase=6 , __UpperCamelCase=2_048 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=2_048 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=256 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , ) -> List[str]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__a , __a ):
_a = backbone_config.get("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__a )
# set timm attributes to None
_a = None, None, None
_a = use_timm_backbone
_a = backbone_config
_a = num_channels
_a = num_queries
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = encoder_layers
_a = auxiliary_loss
_a = position_embedding_type
_a = backbone
_a = use_pretrained_backbone
_a = dilation
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a )
@property
def a_ ( self ) -> int:
return self.encoder_attention_heads
@property
def a_ ( self ) -> int:
return self.d_model
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
UpperCAmelCase = version.parse('''1.11''' )
@property
def a_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def a_ ( self ) -> float:
return 1e-5
@property
def a_ ( self ) -> int:
return 12
| 707 |
'''simple docstring'''
from math import factorial, pi
def __UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(__lowerCamelCase , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
_a = float(__lowerCamelCase )
_a = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__lowerCamelCase ) )
def __UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(__lowerCamelCase , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
_a = float(__lowerCamelCase )
_a = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 276 | 0 |
'''simple docstring'''
import numpy as np
import qiskit
def A__ ( __lowerCAmelCase : int = 8 , __lowerCAmelCase : int | None = None ):
lowerCamelCase__ = np.random.default_rng(seed=__lowerCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowerCamelCase__ = 6 * key_len
# Measurement basis for Alice's qubits.
lowerCamelCase__ = rng.integers(2 , size=__lowerCAmelCase )
# The set of states Alice will prepare.
lowerCamelCase__ = rng.integers(2 , size=__lowerCAmelCase )
# Measurement basis for Bob's qubits.
lowerCamelCase__ = rng.integers(2 , size=__lowerCAmelCase )
# Quantum Circuit to simulate BB84
lowerCamelCase__ = qiskit.QuantumCircuit(__lowerCAmelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__lowerCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(__lowerCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(__lowerCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__lowerCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(__lowerCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowerCamelCase__ = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowerCamelCase__ = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1 , seed_simulator=__lowerCAmelCase )
# Returns the result of measurement.
lowerCamelCase__ = job.result().get_counts(__lowerCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowerCamelCase__ = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowerCamelCase__ = gen_key[:key_len] if len(__lowerCAmelCase ) >= key_len else gen_key.ljust(__lowerCAmelCase , """0""" )
return key
if __name__ == "__main__":
print(F'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 50 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = 0
while b > 0:
if b & 1:
__UpperCAmelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 303 | 0 |
'''simple docstring'''
def a ( UpperCamelCase_ : int ) -> int:
if n == 1 or not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return 0
elif n == 2:
return 1
else:
snake_case__ =[0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def a ( UpperCamelCase_ : int ) -> Tuple:
snake_case__ =0
snake_case__ =2
while digits < n:
index += 1
snake_case__ =len(str(fibonacci(UpperCamelCase_ ) ) )
return index
def a ( UpperCamelCase_ : int = 1000 ) -> Optional[Any]:
return fibonacci_digits_index(UpperCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 705 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a ( UpperCamelCase_ : List[str] ) -> Any:
snake_case__ =args.pruning_method
snake_case__ =args.threshold
snake_case__ =args.model_name_or_path.rstrip('/' )
snake_case__ =args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
snake_case__ =torch.load(os.path.join(UpperCamelCase_ , 'pytorch_model.bin' ) )
snake_case__ ={}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
snake_case__ =tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
snake_case__ =tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
snake_case__ =tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
snake_case__ =MagnitudeBinarizer.apply(inputs=UpperCamelCase_ , threshold=UpperCamelCase_ )
snake_case__ =tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
snake_case__ =name[:-6]
snake_case__ =model[f"""{prefix_}mask_scores"""]
snake_case__ =TopKBinarizer.apply(UpperCamelCase_ , UpperCamelCase_ )
snake_case__ =tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
snake_case__ =name[:-6]
snake_case__ =model[f"""{prefix_}mask_scores"""]
snake_case__ =ThresholdBinarizer.apply(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
snake_case__ =tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
snake_case__ =name[:-6]
snake_case__ =model[f"""{prefix_}mask_scores"""]
snake_case__ , snake_case__ =-0.1, 1.1
snake_case__ =torch.sigmoid(UpperCamelCase_ )
snake_case__ =s * (r - l) + l
snake_case__ =s_bar.clamp(min=0.0 , max=1.0 )
snake_case__ =tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
snake_case__ =os.path.join(
os.path.dirname(UpperCamelCase_ ) , f"""bertarized_{os.path.basename(UpperCamelCase_ )}""" )
if not os.path.isdir(UpperCamelCase_ ):
shutil.copytree(UpperCamelCase_ , UpperCamelCase_ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
main(args)
| 581 | 0 |
from maths.prime_factors import prime_factors
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
if not isinstance(snake_case_ ,snake_case_ ):
lowercase__ : List[str] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(snake_case_ )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(snake_case_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod() | 397 |
from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = -1 ):
if hi < 0:
snake_case__ : Any = len(snake_case_ )
while lo < hi:
snake_case__ : Any = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
snake_case__ : Optional[Any] = mid + 1
else:
snake_case__ : Optional[Any] = mid
return lo
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = -1 ):
if hi < 0:
snake_case__ : Union[str, Any] = len(snake_case_ )
while lo < hi:
snake_case__ : Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
snake_case__ : Dict = mid + 1
else:
snake_case__ : int = mid
return lo
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = -1 ):
sorted_collection.insert(bisect_left(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = -1 ):
sorted_collection.insert(bisect_right(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int ):
snake_case__ : List[str] = 0
snake_case__ : Tuple = len(snake_case_ ) - 1
while left <= right:
snake_case__ : str = left + (right - left) // 2
snake_case__ : Dict = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
snake_case__ : int = midpoint - 1
else:
snake_case__ : Dict = midpoint + 1
return None
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int ):
snake_case__ : List[Any] = bisect.bisect_left(snake_case_ , snake_case_ )
if index != len(snake_case_ ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
if right < left:
return None
snake_case__ : str = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(snake_case_ , snake_case_ , snake_case_ , midpoint - 1 )
else:
return binary_search_by_recursion(snake_case_ , snake_case_ , midpoint + 1 , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = input("""Enter numbers separated by comma:\n""").strip()
__lowerCamelCase : str = sorted(int(item) for item in user_input.split(""","""))
__lowerCamelCase : List[Any] = int(input("""Enter a single number to be found in the list:\n"""))
__lowerCamelCase : Tuple = binary_search(collection, target)
if result is None:
print(f"{target} was not found in {collection}.")
else:
print(f"{target} was found at position {result} in {collection}.")
| 297 | 0 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _lowerCamelCase( a ):
random.seed(a )
np.random.seed(a )
torch.manual_seed(a )
torch.cuda.manual_seed_all(a )
# ^^ safe to call this function even if cuda is not available
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase = 0.9999 , lowerCamelCase = 0.0 , lowerCamelCase = 0 , lowerCamelCase = False , lowerCamelCase = 1.0 , lowerCamelCase = 2 / 3 , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
if isinstance(lowerCamelCase , torch.nn.Module ):
__a = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase , )
__a = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
__a = True
if kwargs.get("max_value" , lowerCamelCase ) is not None:
__a = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase )
__a = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase ) is not None:
__a = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase )
__a = kwargs["min_value"]
__a = list(lowerCamelCase )
__a = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase ) is not None:
__a = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase )
self.to(device=kwargs["device"] )
__a = None
__a = decay
__a = min_decay
__a = update_after_step
__a = use_ema_warmup
__a = inv_gamma
__a = power
__a = 0
__a = None # set in `step()`
__a = model_cls
__a = model_config
@classmethod
def a__ ( cls , lowerCamelCase , lowerCamelCase ):
__a , __a = model_cls.load_config(lowerCamelCase , return_unused_kwargs=lowerCamelCase )
__a = model_cls.from_pretrained(lowerCamelCase )
__a = cls(model.parameters() , model_cls=lowerCamelCase , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase )
return ema_model
def a__ ( self , lowerCamelCase ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
__a = self.model_cls.from_config(self.model_config )
__a = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase )
model.register_to_config(**lowerCamelCase )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
__a = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
__a = (1 + step) / (10 + step)
__a = min(lowerCamelCase , self.decay )
# make sure decay is not smaller than min_decay
__a = max(lowerCamelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def a__ ( self , lowerCamelCase ):
if isinstance(lowerCamelCase , torch.nn.Module ):
__a = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase , )
__a = parameters.parameters()
__a = list(lowerCamelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
__a = self.get_decay(self.optimization_step )
__a = decay
__a = 1 - decay
__a = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
__a = deepspeed.zero.GatheredParameters(lowerCamelCase , modifier_rank=lowerCamelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = list(lowerCamelCase )
for s_param, param in zip(self.shadow_params , lowerCamelCase ):
param.data.copy_(s_param.to(param.device ).data )
def a__ ( self , lowerCamelCase=None , lowerCamelCase=None ):
__a = [
p.to(device=lowerCamelCase , dtype=lowerCamelCase ) if p.is_floating_point() else p.to(device=lowerCamelCase )
for p in self.shadow_params
]
def a__ ( self ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def a__ ( self , lowerCamelCase ):
__a = [param.detach().cpu().clone() for param in parameters]
def a__ ( self , lowerCamelCase ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
__a = None
def a__ ( self , lowerCamelCase ):
__a = copy.deepcopy(lowerCamelCase )
__a = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
__a = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase ):
raise ValueError("Invalid min_decay" )
__a = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase ):
raise ValueError("Invalid optimization_step" )
__a = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase ):
raise ValueError("Invalid update_after_step" )
__a = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase ):
raise ValueError("Invalid use_ema_warmup" )
__a = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
__a = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
__a = state_dict.get("shadow_params" , lowerCamelCase )
if shadow_params is not None:
__a = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 701 | """simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : int
_snake_case : int
_snake_case : float
_snake_case : float
_snake_case : Tuple[int]
def a__ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a__ ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a__ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a__ ( self ):
__a = torch.arange(self.height * self.width )
__a = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def a__ ( self ):
__a , *__a = self.shape
__a = int(np.prod(lowerCamelCase ) )
__a = self.get_image_coords()
__a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__a = self.get_camera_rays(lowerCamelCase )
__a = rays.view(lowerCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a__ ( self , lowerCamelCase ):
__a , *__a , __a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__a = coords.view(lowerCamelCase , -1 , 2 )
__a = self.resolution()
__a = self.fov()
__a = (flat.float() / (res - 1)) * 2 - 1
__a = fracs * torch.tan(fov / 2 )
__a = fracs.view(lowerCamelCase , -1 , 2 )
__a = (
self.z.view(lowerCamelCase , 1 , 3 )
+ self.x.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
__a = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase )
__a = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase , *lowerCamelCase , 2 , 3 )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase , height=lowerCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase( a ):
__a = []
__a = []
__a = []
__a = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__a = np.array([np.sin(a ), np.cos(a ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__a = -z * 4
__a = np.array([np.cos(a ), -np.sin(a ), 0.0] )
__a = np.cross(a , a )
origins.append(a )
xs.append(a )
ys.append(a )
zs.append(a )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(a , axis=0 ) ).float() , x=torch.from_numpy(np.stack(a , axis=0 ) ).float() , y=torch.from_numpy(np.stack(a , axis=0 ) ).float() , z=torch.from_numpy(np.stack(a , axis=0 ) ).float() , width=a , height=a , x_fov=0.7 , y_fov=0.7 , shape=(1, len(a )) , )
| 67 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] ):
_A = len(__snake_case )
for i in range(length - 1 ):
_A = i
for k in range(i + 1 , __snake_case ):
if collection[k] < collection[least]:
_A = k
if least != i:
_A , _A = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
_UpperCAmelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 107 | '''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
_A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
_A = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ ) )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCamelCase__ ) )
def __UpperCAmelCase ( self : int ) -> Dict:
_A = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
_A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCamelCase__ ) )
def __UpperCAmelCase ( self : str ) -> Dict:
_A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_A = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
_A = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_A = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# pass variant but use the non-variant filenames
_A = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
_A = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
_A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_A = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
_A = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
_A = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : Tuple ) -> str:
# pass variant but use the non-variant filenames
_A = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
_A = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : List[Any] ) -> int:
_A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_A = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
| 107 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def UpperCamelCase__( )->Node | None:
A__ = Node(1 )
A__ = Node(2 )
A__ = Node(3 )
A__ = Node(4 )
A__ = Node(5 )
return tree
def UpperCamelCase__( UpperCamelCase__ : Node | None )->list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase__( UpperCamelCase__ : Node | None )->list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase__( UpperCamelCase__ : Node | None )->list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase__( UpperCamelCase__ : Node | None )->int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCamelCase__( UpperCamelCase__ : Node | None )->Sequence[Node | None]:
A__ = []
if root is None:
return output
A__ = deque([root] )
while process_queue:
A__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase__( UpperCamelCase__ : Node | None , UpperCamelCase__ : int )->Sequence[Node | None]:
A__ = []
def populate_output(UpperCamelCase__ : Node | None , UpperCamelCase__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(UpperCamelCase__ , UpperCamelCase__ )
return output
def UpperCamelCase__( UpperCamelCase__ : Node | None , UpperCamelCase__ : int )->Sequence[Node | None]:
A__ = []
def populate_output(UpperCamelCase__ : Node | None , UpperCamelCase__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(UpperCamelCase__ , UpperCamelCase__ )
return output
def UpperCamelCase__( UpperCamelCase__ : Node | None )->Sequence[Node | None] | list[Any]:
if root is None:
return []
A__ = []
A__ = 0
A__ = height(UpperCamelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(UpperCamelCase__ , UpperCamelCase__ ) )
A__ = 1
else:
output.append(get_nodes_from_right_to_left(UpperCamelCase__ , UpperCamelCase__ ) )
A__ = 0
return output
def UpperCamelCase__( )->None: # Main function for testing.
A__ = make_tree()
print(f"In-order Traversal: {inorder(UpperCamelCase__ )}" )
print(f"Pre-order Traversal: {preorder(UpperCamelCase__ )}" )
print(f"Post-order Traversal: {postorder(UpperCamelCase__ )}" , '''\n''' )
print(f"Height of Tree: {height(UpperCamelCase__ )}" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(UpperCamelCase__ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(UpperCamelCase__ ) + 1 ):
print(f"Level {level}:" , get_nodes_from_left_to_right(UpperCamelCase__ , level=UpperCamelCase__ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 212 |
import math
def UpperCamelCase__( UpperCamelCase__ : int )->bool:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
A__ = range(3 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def UpperCamelCase__( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=1 , **UpperCamelCase__ : int )->List[Any]:
A__ = factor * value
A__ = value
while not is_prime(UpperCamelCase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **UpperCamelCase__ )
return value
| 212 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowercase__ ( snake_case__ ):
'''simple docstring'''
_UpperCAmelCase = 42
class lowercase__ ( snake_case__, snake_case__ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case = 65536 , snake_case = None , snake_case = 2 , snake_case = 2 , snake_case = 0 , snake_case = "fourier" , snake_case = True , snake_case = False , snake_case = 0.0 , snake_case = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , snake_case = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , snake_case = "UNetMidBlock1D" , snake_case = None , snake_case = (32, 32, 64) , snake_case = None , snake_case = 8 , snake_case = 1 , snake_case = False , ) -> Any:
super().__init__()
_UpperCAmelCase = sample_size
# time
if time_embedding_type == "fourier":
_UpperCAmelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase )
_UpperCAmelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_UpperCAmelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase )
_UpperCAmelCase = block_out_channels[0]
if use_timestep_embedding:
_UpperCAmelCase = block_out_channels[0] * 4
_UpperCAmelCase = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
_UpperCAmelCase = nn.ModuleList([] )
_UpperCAmelCase = None
_UpperCAmelCase = nn.ModuleList([] )
_UpperCAmelCase = None
# down
_UpperCAmelCase = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = output_channel
_UpperCAmelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_UpperCAmelCase = i == len(_UpperCAmelCase ) - 1
_UpperCAmelCase = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase )
# mid
_UpperCAmelCase = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
_UpperCAmelCase = list(reversed(_UpperCAmelCase ) )
_UpperCAmelCase = reversed_block_out_channels[0]
if out_block_type is None:
_UpperCAmelCase = out_channels
else:
_UpperCAmelCase = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = output_channel
_UpperCAmelCase = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase ) - 1 else final_upsample_channels
)
_UpperCAmelCase = i == len(_UpperCAmelCase ) - 1
_UpperCAmelCase = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase )
_UpperCAmelCase = output_channel
# out
_UpperCAmelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_UpperCAmelCase = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case = True , ) -> Union[str, Any]:
_UpperCAmelCase = timestep
if not torch.is_tensor(_UpperCAmelCase ):
_UpperCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0:
_UpperCAmelCase = timesteps[None].to(sample.device )
_UpperCAmelCase = self.time_proj(_UpperCAmelCase )
if self.config.use_timestep_embedding:
_UpperCAmelCase = self.time_mlp(_UpperCAmelCase )
else:
_UpperCAmelCase = timestep_embed[..., None]
_UpperCAmelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_UpperCAmelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_UpperCAmelCase = ()
for downsample_block in self.down_blocks:
_UpperCAmelCase = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_UpperCAmelCase = self.mid_block(_UpperCAmelCase , _UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_UpperCAmelCase = down_block_res_samples[-1:]
_UpperCAmelCase = down_block_res_samples[:-1]
_UpperCAmelCase = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase )
# 5. post-process
if self.out_block:
_UpperCAmelCase = self.out_block(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase )
| 573 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self: str , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int]=7 , _UpperCAmelCase: Union[str, Any]=3 , _UpperCAmelCase: int=18 , _UpperCAmelCase: List[Any]=30 , _UpperCAmelCase: List[Any]=400 , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Any=None , _UpperCAmelCase: Any=True , _UpperCAmelCase: int=None , _UpperCAmelCase: Union[str, Any]=True , ):
_lowerCAmelCase :Tuple = size if size is not None else {'shortest_edge': 20}
_lowerCAmelCase :str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase :str = parent
_lowerCAmelCase :List[Any] = batch_size
_lowerCAmelCase :Optional[Any] = num_channels
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :int = min_resolution
_lowerCAmelCase :List[str] = max_resolution
_lowerCAmelCase :List[str] = do_resize
_lowerCAmelCase :Optional[int] = size
_lowerCAmelCase :str = do_center_crop
_lowerCAmelCase :int = crop_size
_lowerCAmelCase :Optional[int] = do_flip_channel_order
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self: str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_flip_channel_order' ) )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self: int ):
# Initialize image_processing
_lowerCAmelCase :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :str = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
# Initialize image_processing
_lowerCAmelCase :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :List[str] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
# Initialize image_processing
_lowerCAmelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase :int = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 687 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase__ ,'''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowercase__ ,'''num_attention_heads''' ) )
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : Any ,lowercase__ : Optional[int]=1_3 ,lowercase__ : Union[str, Any]=6_4 ,lowercase__ : List[str]=3 ,lowercase__ : Any=3 ,lowercase__ : Any=2 ,lowercase__ : Optional[int]=1 ,lowercase__ : Tuple=1_6 ,lowercase__ : List[str]=[1_2_8, 2_5_6, 3_8_4] ,lowercase__ : List[str]=[4, 6, 8] ,lowercase__ : Tuple=[2, 3, 4] ,lowercase__ : str=[1_6, 1_6, 1_6] ,lowercase__ : Optional[int]=0 ,lowercase__ : List[Any]=[2, 2, 2] ,lowercase__ : List[str]=[2, 2, 2] ,lowercase__ : Optional[int]=0.0_2 ,lowercase__ : List[str]=True ,lowercase__ : Optional[Any]=True ,lowercase__ : Any=2 ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = kernel_size
__lowercase = stride
__lowercase = padding
__lowercase = hidden_sizes
__lowercase = num_attention_heads
__lowercase = depths
__lowercase = key_dim
__lowercase = drop_path_rate
__lowercase = patch_size
__lowercase = attention_ratio
__lowercase = mlp_ratio
__lowercase = initializer_range
__lowercase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__lowercase = is_training
__lowercase = use_labels
__lowercase = num_labels
__lowercase = initializer_range
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return LevitConfig(
image_size=self.image_size ,num_channels=self.num_channels ,kernel_size=self.kernel_size ,stride=self.stride ,padding=self.padding ,patch_size=self.patch_size ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,depths=self.depths ,key_dim=self.key_dim ,drop_path_rate=self.drop_path_rate ,mlp_ratio=self.mlp_ratio ,attention_ratio=self.attention_ratio ,initializer_range=self.initializer_range ,down_ops=self.down_ops ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : str ,lowercase__ : Any ):
__lowercase = LevitModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
__lowercase = (self.image_size, self.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for _ in range(4 ):
__lowercase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__lowercase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : str ):
__lowercase = self.num_labels
__lowercase = LevitForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Tuple = False
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = LevitModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Any ):
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
def check_hidden_states_output(lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ):
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.hidden_states
__lowercase = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowercase__ ) ,lowercase__ )
__lowercase = (self.model_tester.image_size, self.model_tester.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for _ in range(4 ):
__lowercase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__lowercase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[
height * width,
self.model_tester.hidden_sizes[0],
] ,)
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple=False ):
__lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
if not self.model_tester.is_training:
return
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.train()
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
__lowercase = model(**lowercase__ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowercase = False
__lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__lowercase = model_class(lowercase__ )
model.gradient_checkpointing_enable()
model.to(lowercase__ )
model.train()
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
__lowercase = model(**lowercase__ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
__lowercase = problem_type['''title''']
__lowercase = problem_type['''num_labels''']
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.train()
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
if problem_type["num_labels"] > 1:
__lowercase = inputs['''labels'''].unsqueeze(1 ).repeat(1 ,problem_type['''num_labels'''] )
__lowercase = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase__ ) as warning_list:
__lowercase = model(**lowercase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = LevitModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _A ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : Any ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
# verify the logits
__lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowercase__ ,atol=1e-4 ) )
| 624 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = int(np.ceil((x_end - xa) / step_size ) )
__lowercase = np.zeros((n + 1,) )
__lowercase = ya
__lowercase = xa
for k in range(A__ ):
__lowercase = y[k] + step_size * ode_func(A__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__A : List[Any] = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCAmelCase__ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = BartphoTokenizer
__UpperCAmelCase : int = False
__UpperCAmelCase : str = True
def snake_case ( self : Union[str, Any] ):
super().setUp()
__lowercase : List[Any] = ["▁This", "▁is", "▁a", "▁t", "est"]
__lowercase : Dict = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__lowercase : Dict = {"unk_token": "<unk>"}
__lowercase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
__lowercase : Optional[Any] = BartphoTokenizer(lowercase__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Any , **lowercase__ : List[Any] ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def snake_case ( self : str , lowercase__ : Any ):
__lowercase : Any = "This is a là test"
__lowercase : Any = "This is a<unk><unk> test"
return input_text, output_text
def snake_case ( self : Any ):
__lowercase : List[Any] = BartphoTokenizer(lowercase__ , self.monolingual_vocab_file , **self.special_tokens_map )
__lowercase : int = "This is a là test"
__lowercase : Union[str, Any] = "▁This ▁is ▁a ▁l à ▁t est".split()
__lowercase : str = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
__lowercase : str = tokens + [tokenizer.unk_token]
__lowercase : List[str] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
| 575 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : List[Any] = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 289 | 0 |
def lowerCAmelCase__ ( UpperCamelCase_ : list[list[int | float]] )-> int:
A__ = len(UpperCamelCase_ )
A__ = len(matrix[0] )
A__ = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
A__ = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
A__ = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
A__ , A__ = matrix[i], matrix[row]
A__ = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
A__ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 526 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_lowercase = logging.get_logger(__name__)
_lowercase = "T5Config"
def lowerCAmelCase__ ( UpperCamelCase_ : jnp.array , UpperCamelCase_ : int , UpperCamelCase_ : int )-> jnp.ndarray:
A__ = jnp.zeros_like(UpperCamelCase_ )
A__ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
A__ = shifted_input_ids.at[:, 0].set(UpperCamelCase_ )
A__ = jnp.where(shifted_input_ids == -1_0_0 , UpperCamelCase_ , UpperCamelCase_ )
return shifted_input_ids
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''mt5'''
UpperCamelCase__ = MTaConfig
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''mt5'''
UpperCamelCase__ = MTaConfig
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''mt5'''
UpperCamelCase__ = MTaConfig
| 526 | 1 |
import os
import time
import numpy as np
import onnxruntime as ort
_SCREAMING_SNAKE_CASE = "1"
_SCREAMING_SNAKE_CASE = "0"
_SCREAMING_SNAKE_CASE = "1"
_SCREAMING_SNAKE_CASE = ort.SessionOptions()
_SCREAMING_SNAKE_CASE = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
_SCREAMING_SNAKE_CASE = ["TensorrtExecutionProvider", "CUDAExecutionProvider"]
_SCREAMING_SNAKE_CASE = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
_SCREAMING_SNAKE_CASE = ort.RunOptions()
_SCREAMING_SNAKE_CASE = 1_28
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = np.ones((batch, sequence), dtype=np.intaa)
_SCREAMING_SNAKE_CASE = np.ones((batch, sequence), dtype=np.intaa)
_SCREAMING_SNAKE_CASE = np.ones((batch, sequence), dtype=np.intaa)
print("Warm up phase...")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Start inference...")
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = 20_00
_SCREAMING_SNAKE_CASE = {}
for iter in range(max_iters):
_SCREAMING_SNAKE_CASE = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 10_00 / max_iters))
| 181 |
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :List[Any]):
"""simple docstring"""
_lowercase ={}
def UpperCamelCase__ ( self :str):
"""simple docstring"""
print(self.vertex)
for i in self.vertex:
print(snake_case, ' -> ', ' -> '.join([str(snake_case) for j in self.vertex[i]]))
def UpperCamelCase__ ( self :Any, snake_case :int, snake_case :int):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(snake_case)
else:
# else make a new vertex
_lowercase =[to_vertex]
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
_lowercase =[False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if not visited[i]:
self.dfs_recursive(snake_case, snake_case)
def UpperCamelCase__ ( self :List[str], snake_case :int, snake_case :list):
"""simple docstring"""
_lowercase =True
print(snake_case, end=' ')
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(snake_case, snake_case)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 181 | 1 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
for char in word:
_a = ord(_lowerCAmelCase )
if not _is_chinese_char(_lowerCAmelCase ):
return 0
return 1
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = set()
for token in tokens:
_a = len(_lowerCAmelCase ) > 1 and is_chinese(_lowerCAmelCase )
if chinese_word:
word_set.add(_lowerCAmelCase )
_a = list(_lowerCAmelCase )
return word_list
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_a = max([len(_lowerCAmelCase ) for w in chinese_word_set] )
_a = bert_tokens
_a , _a = 0, len(_lowerCAmelCase )
while start < end:
_a = True
if is_chinese(bert_word[start] ):
_a = min(end - start, _lowerCAmelCase )
for i in range(_lowerCAmelCase, 1, -1 ):
_a = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
_a = '''##''' + bert_word[j]
_a = start + i
_a = False
break
if single_word:
start += 1
return bert_word
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : LTP, _lowerCAmelCase : BertTokenizer ):
"""simple docstring"""
_a = []
for i in range(0, len(_lowerCAmelCase ), 1_00 ):
_a = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws
_a = [get_chinese_word(_lowerCAmelCase ) for r in res]
ltp_res.extend(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
_a = []
for i in range(0, len(_lowerCAmelCase ), 1_00 ):
_a = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=_lowerCAmelCase, truncation=_lowerCAmelCase, max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
_a = []
for input_ids, chinese_word in zip(_lowerCAmelCase, _lowerCAmelCase ):
_a = []
for id in input_ids:
_a = bert_tokenizer._convert_id_to_token(_lowerCAmelCase )
input_tokens.append(_lowerCAmelCase )
_a = add_sub_symbol(_lowerCAmelCase, _lowerCAmelCase )
_a = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCAmelCase ):
if token[:2] == "##":
_a = token[2:]
# save chinese tokens' pos
if len(_lowerCAmelCase ) == 1 and _is_chinese_char(ord(_lowerCAmelCase ) ):
ref_id.append(_lowerCAmelCase )
ref_ids.append(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
return ref_ids
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
_a = f.readlines()
_a = [line.strip() for line in data if len(_lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_a = LTP(args.ltp ) # faster in GPU device
_a = BertTokenizer.from_pretrained(args.bert )
_a = prepare_ref(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
_a = [json.dumps(_lowerCAmelCase ) + '''\n''' for ref in ref_ids]
f.writelines(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__snake_case = parser.parse_args()
main(args) | 285 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels | 285 | 1 |
def __snake_case ( _lowerCAmelCase : Any ) -> int:
A_ : Optional[int] = abs(_lowercase )
A_ : Optional[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def __snake_case ( _lowerCAmelCase : str ) -> Tuple:
A_ : List[Any] = abs(_lowercase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __snake_case ( _lowerCAmelCase : int ) -> Any:
return sum(int(_lowercase ) for c in str(abs(_lowercase ) ) )
def __snake_case ( ) -> Optional[Any]:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> None:
A_ : Optional[Any] = f"{func.__name__}({value})"
A_ : Dict = timeit(f"__main__.{call}" , setup="import __main__" )
print(f"{call:56} = {func(_lowercase )} -- {timing:.4f} seconds" )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowercase , _lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 454 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return np.maximum(0 , _lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 331 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowerCAmelCase_ = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class _A ( unittest.TestCase ):
def __a ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase : Tuple = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
lowercase : Any = self.transformer_dir
shutil.copy(
os.path.join(__A , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def __a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[str] = "src/transformers"
shutil.rmtree(self.transformer_dir )
def __a ( self : Any , _A : Dict , _A : List[Any] , _A : Union[str, Any] , _A : Any=None ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase : Union[str, Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowercase : Union[str, Any] = black.format_str(__A , mode=__A )
lowercase : Dict = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(__A , '''w''' , newline='''\n''' ) as f:
f.write(__A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__A )
with open(__A , '''r''' ) as f:
self.assertTrue(f.read() , __A )
def __a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : List[str] = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(__A , __A )
def __a ( self : Dict ) -> Tuple:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __A , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __A ) , )
# Copy consistency with a really long name
lowercase : Tuple = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub('''Bert''' , __A , __A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __A , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __A ) , )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase : Any = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
lowercase : int = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
lowercase : Optional[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
lowercase : List[str] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
lowercase : Any = check_copies.convert_to_localized_md(
__A , __A , localized_readme['''format_model_list'''] )
self.assertFalse(__A )
self.assertEqual(__A , __A )
lowercase : Tuple = check_copies.convert_to_localized_md(
__A , __A , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__A )
lowercase : Tuple = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
lowercase : Tuple = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
lowercase : Optional[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
lowercase : List[Any] = check_copies.convert_to_localized_md(
__A , __A , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(__A , __A ) | 711 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 596 | 0 |
"""simple docstring"""
A = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.355818,
}
def __A ( a_ :str , a_ :str , a_ :float) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__a : Dict = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(a_)}"""
)
raise ValueError(a_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod() | 52 | import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a : List[Any] = logging.get_logger(__name__)
a : Optional[Any] = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
a : str = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
a : Any = {
"""jukebox""": 5_1_2,
}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE: List[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
SCREAMING_SNAKE_CASE: Dict = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=["v3", "v2", "v2"] , lowerCamelCase__=512 , lowerCamelCase__=5 , lowerCamelCase__="<|endoftext|>" , **lowerCamelCase__ , ):
lowerCAmelCase_: List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
super().__init__(
unk_token=lowerCamelCase__ , n_genres=lowerCamelCase__ , version=lowerCamelCase__ , max_n_lyric_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
lowerCAmelCase_: Any = version
lowerCAmelCase_: str = max_n_lyric_tokens
lowerCAmelCase_: Optional[Any] = n_genres
with open(lowerCamelCase__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_: List[Any] = json.load(lowerCamelCase__ )
with open(lowerCamelCase__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_: Tuple = json.load(lowerCamelCase__ )
with open(lowerCamelCase__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_: Tuple = json.load(lowerCamelCase__ )
lowerCAmelCase_: List[str] = R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
lowerCAmelCase_: List[Any] = oov.replace(R"\-'" , R"\-+'" )
lowerCAmelCase_: List[Any] = regex.compile(lowerCamelCase__ )
lowerCAmelCase_: Dict = {v: k for k, v in self.artists_encoder.items()}
lowerCAmelCase_: List[str] = {v: k for k, v in self.genres_encoder.items()}
lowerCAmelCase_: int = {v: k for k, v in self.lyrics_encoder.items()}
@property
def _a ( self ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def _a ( self ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: Union[str, Any] = [self.artists_encoder.get(lowerCamelCase__ , 0 ) for artist in list_artists]
for genres in range(len(lowerCamelCase__ ) ):
lowerCAmelCase_: Tuple = [self.genres_encoder.get(lowerCamelCase__ , 0 ) for genre in list_genres[genres]]
lowerCAmelCase_: Any = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
lowerCAmelCase_: Optional[Any] = [[self.lyrics_encoder.get(lowerCamelCase__ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def _a ( self , lowerCamelCase__ ):
return list(lowerCamelCase__ )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: Union[str, Any] = self.prepare_for_tokenization(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: Any = self._tokenize(lowerCamelCase__ )
return artist, genre, lyrics
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
lowerCAmelCase_: List[Any] = artists[idx].lower()
lowerCAmelCase_: Tuple = [genres[idx].lower()]
else:
lowerCAmelCase_: Optional[Any] = self._normalize(artists[idx] ) + ".v2"
lowerCAmelCase_: int = [
self._normalize(lowerCamelCase__ ) + ".v2" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowerCAmelCase_: List[Any] = regex.compile(R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
lowerCAmelCase_: Optional[int] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
lowerCAmelCase_: Union[str, Any] = {vocab[index]: index + 1 for index in range(len(lowerCamelCase__ ) )}
lowerCAmelCase_: List[Any] = 0
lowerCAmelCase_: str = len(lowerCamelCase__ ) + 1
lowerCAmelCase_: Union[str, Any] = self.vocab
lowerCAmelCase_: List[str] = {v: k for k, v in self.vocab.items()}
lowerCAmelCase_: Optional[Any] = ""
else:
lowerCAmelCase_: Optional[int] = regex.compile(R"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
lowerCAmelCase_: int = self._run_strip_accents(lowerCamelCase__ )
lowerCAmelCase_: Optional[Any] = lyrics.replace("\\" , "\n" )
lowerCAmelCase_: List[str] = self.out_of_vocab.sub("" , lowerCamelCase__ ), [], []
return artists, genres, lyrics
def _a ( self , lowerCamelCase__ ):
lowerCAmelCase_: int = unicodedata.normalize("NFD" , lowerCamelCase__ )
lowerCAmelCase_: Any = []
for char in text:
lowerCAmelCase_: Optional[Any] = unicodedata.category(lowerCamelCase__ )
if cat == "Mn":
continue
output.append(lowerCamelCase__ )
return "".join(lowerCamelCase__ )
def _a ( self , lowerCamelCase__ ):
lowerCAmelCase_: Tuple = (
[chr(lowerCamelCase__ ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(lowerCamelCase__ ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(lowerCamelCase__ ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["."]
)
lowerCAmelCase_: List[str] = frozenset(lowerCamelCase__ )
lowerCAmelCase_: Any = re.compile(R"_+" )
lowerCAmelCase_: List[Any] = "".join([c if c in accepted else "_" for c in text.lower()] )
lowerCAmelCase_: Union[str, Any] = pattern.sub("_" , lowerCamelCase__ ).strip("_" )
return text
def _a ( self , lowerCamelCase__ ):
return " ".join(lowerCamelCase__ )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
# Convert to TensorType
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: str = TensorType(lowerCamelCase__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
lowerCAmelCase_: Optional[int] = tf.constant
lowerCAmelCase_: str = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
lowerCAmelCase_: Optional[Any] = torch.tensor
lowerCAmelCase_: Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
lowerCAmelCase_: Optional[Any] = jnp.array
lowerCAmelCase_: List[str] = _is_jax
else:
lowerCAmelCase_: Optional[int] = np.asarray
lowerCAmelCase_: int = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowerCAmelCase_: Any = [inputs]
if not is_tensor(lowerCamelCase__ ):
lowerCAmelCase_: Optional[int] = as_tensor(lowerCamelCase__ )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="" , lowerCamelCase__="pt" ):
lowerCAmelCase_: Any = [0, 0, 0]
lowerCAmelCase_: Dict = [artist] * len(self.version )
lowerCAmelCase_: int = [genres] * len(self.version )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: List[str] = self.tokenize(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: str = self._convert_token_to_id(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: List[Any] = [-INFINITY] * len(full_tokens[-1] )
lowerCAmelCase_: Optional[Any] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=lowerCamelCase__ )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_: Dict = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=lowerCamelCase__ ) )
lowerCAmelCase_: Optional[Any] = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=lowerCamelCase__ ) )
lowerCAmelCase_: Dict = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=lowerCamelCase__ ) )
return (artists_file, genres_file, lyrics_file)
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: str = self.artists_decoder.get(lowerCamelCase__ )
lowerCAmelCase_: Optional[int] = [self.genres_decoder.get(lowerCamelCase__ ) for genre in genres_index]
lowerCAmelCase_: Optional[int] = [self.lyrics_decoder.get(lowerCamelCase__ ) for character in lyric_index]
return artist, genres, lyrics | 613 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase : int = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase : str = """ViltImageProcessor"""
__lowerCAmelCase : Optional[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ) -> List[str]:
_a : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _a , )
_a : List[str] = kwargs.pop('feature_extractor' )
_a : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_a , _a )
_a : Optional[Any] = self.image_processor
def __call__( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = True , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = 0 , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = True , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> Any:
_a : int = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
_a : Optional[int] = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def __UpperCamelCase ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> List[str]:
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCamelCase ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> List[str]:
return self.tokenizer.decode(*_a , **_a )
@property
def __UpperCamelCase ( self ) -> List[Any]:
_a : Optional[int] = self.tokenizer.model_input_names
_a : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCamelCase ( self ) -> Tuple:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _a , )
return self.image_processor_class
@property
def __UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _a , )
return self.image_processor
| 705 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
UpperCAmelCase_ : List[Any] = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Tuple = """tapas"""
def __init__( self , lowerCamelCase_=3_0_5_2_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , lowerCamelCase_=0 , lowerCamelCase_=10.0 , lowerCamelCase_=0 , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=1.0 , lowerCamelCase_=False , lowerCamelCase_=None , lowerCamelCase_=1.0 , lowerCamelCase_=1.0 , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_="ratio" , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=6_4 , lowerCamelCase_=3_2 , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
_a : Optional[Any] = vocab_size
_a : List[str] = hidden_size
_a : Union[str, Any] = num_hidden_layers
_a : Tuple = num_attention_heads
_a : Tuple = hidden_act
_a : Optional[Any] = intermediate_size
_a : Dict = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : int = max_position_embeddings
_a : str = type_vocab_sizes
_a : Tuple = initializer_range
_a : int = layer_norm_eps
# Fine-tuning task hyperparameters
_a : Any = positive_label_weight
_a : Optional[int] = num_aggregation_labels
_a : Any = aggregation_loss_weight
_a : str = use_answer_as_supervision
_a : Optional[int] = answer_loss_importance
_a : int = use_normalized_answer_loss
_a : Optional[int] = huber_loss_delta
_a : Optional[int] = temperature
_a : Union[str, Any] = aggregation_temperature
_a : List[str] = use_gumbel_for_cells
_a : Optional[Any] = use_gumbel_for_aggregation
_a : str = average_approximation_function
_a : Tuple = cell_selection_preference
_a : Tuple = answer_loss_cutoff
_a : Optional[int] = max_num_rows
_a : List[Any] = max_num_columns
_a : Any = average_logits_per_cell
_a : str = select_one_column
_a : Any = allow_empty_column_selection
_a : Dict = init_cell_selection_weights_to_zero
_a : List[Any] = reset_position_index_per_cell
_a : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
_a : Dict = aggregation_labels
_a : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowerCamelCase_ ):
_a : str = {int(lowerCamelCase_ ): v for k, v in aggregation_labels.items()}
| 424 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class A__ ( __snake_case ):
_UpperCAmelCase :str = 'xlm-roberta'
def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase : Dict = vocab_size
UpperCamelCase : Dict = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Tuple = intermediate_size
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : List[Any] = attention_probs_dropout_prob
UpperCamelCase : str = max_position_embeddings
UpperCamelCase : Tuple = type_vocab_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Any = position_embedding_type
UpperCamelCase : Dict = use_cache
UpperCamelCase : List[Any] = classifier_dropout
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 629 |
__lowerCamelCase : str = 6_5521
def A_ ( _lowerCAmelCase ) -> int:
UpperCamelCase : Any = 1
UpperCamelCase : str = 0
for plain_chr in plain_text:
UpperCamelCase : List[Any] = (a + ord(_lowerCAmelCase )) % MOD_ADLER
UpperCamelCase : List[Any] = (b + a) % MOD_ADLER
return (b << 16) | a
| 629 | 1 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case (unittest.TestCase ):
def __init__( self : int , _UpperCAmelCase : Dict , _UpperCAmelCase : int=3 , _UpperCAmelCase : Union[str, Any]=32 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Dict=10 , _UpperCAmelCase : Tuple=[10, 20, 30, 40] , _UpperCAmelCase : str=[1, 1, 2, 1] , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : str="relu" , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : List[Any]=None , ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Any = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : str = image_size
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : List[str] = embeddings_size
_lowerCAmelCase : Dict = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : List[str] = use_labels
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : str = num_labels
_lowerCAmelCase : Tuple = scope
_lowerCAmelCase : Tuple = len(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Tuple = FlaxRegNetModel(config=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = model(_UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = FlaxRegNetForImageClassification(config=_UpperCAmelCase )
_lowerCAmelCase : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase : List[str] = config_and_inputs
_lowerCAmelCase : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __snake_case (_a , unittest.TestCase ):
lowerCAmelCase__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> None:
'''simple docstring'''
_lowerCAmelCase : Any = FlaxRegNetModelTester(self )
_lowerCAmelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
'''simple docstring'''
return
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = model_class(_UpperCAmelCase )
_lowerCAmelCase : Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCAmelCase : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ):
_lowerCAmelCase : Tuple = model_class(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : List[Any] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Optional[int] = model_class(_UpperCAmelCase )
@jax.jit
def model_jitted(_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Dict ):
return model(pixel_values=_UpperCAmelCase , **_UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase : List[str] = model_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase : Optional[int] = model_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class __snake_case (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Tuple = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
_lowerCAmelCase : Any = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : int = image_processor(images=_UpperCAmelCase , return_tensors="""np""" )
_lowerCAmelCase : str = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase : List[Any] = (1, 1000)
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase : List[str] = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 196 |
from math import loga
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""Input value must be a 'int' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 | 1 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
a_ : Optional[Any] = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
a_ : List[str] = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
a_ : Any = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
a_ : int = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
a_ : List[Any] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string')),
'references': datasets.Value('string'),
}) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=[1, 10, 100] , a=4 , a=3.0) -> str:
if os.getenv('HF_ALLOW_CODE_EVAL' , 0) != "1":
raise ValueError(_WARNING)
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.')
with ThreadPoolExecutor(max_workers=a) as executor:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = Counter()
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = defaultdict(a)
for task_id, (candidates, test_case) in enumerate(zip(a , a)):
for candidate in candidates:
SCREAMING_SNAKE_CASE = candidate + '\n' + test_case
SCREAMING_SNAKE_CASE = (test_program, timeout, task_id, completion_id[task_id])
SCREAMING_SNAKE_CASE = executor.submit(a , *a)
futures.append(a)
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(a):
SCREAMING_SNAKE_CASE = future.result()
results[result["task_id"]].append((result['completion_id'], result))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for result in results.values():
result.sort()
SCREAMING_SNAKE_CASE = [r[1]['passed'] for r in result]
total.append(len(a))
correct.append(sum(a))
SCREAMING_SNAKE_CASE = np.array(a)
SCREAMING_SNAKE_CASE = np.array(a)
SCREAMING_SNAKE_CASE = k
SCREAMING_SNAKE_CASE = {f'''pass@{k}''': estimate_pass_at_k(a , a , a).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
def estimator(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1))
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = itertools.repeat(_UpperCAmelCase , len(_UpperCAmelCase))
else:
assert len(_UpperCAmelCase) == len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = iter(_UpperCAmelCase)
return np.array([estimator(int(_UpperCAmelCase) , int(_UpperCAmelCase) , _UpperCAmelCase) for n, c in zip(_UpperCAmelCase , _UpperCAmelCase)])
| 73 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = load_tool('text-classification')
self.tool.setup()
SCREAMING_SNAKE_CASE = load_tool('text-classification' , remote=a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(a , 'positive')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(a , 'positive')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(a , 'positive')
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(a , 'positive')
| 73 | 1 |
from __future__ import annotations
UpperCamelCase : Optional[int] = list[tuple[int, int]]
UpperCamelCase : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase : Optional[Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A__ :
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float , lowerCamelCase__ : Node | None , ):
a__ : Union[str, Any] = pos_x
a__ : Optional[Any] = pos_y
a__ : Optional[Any] = (pos_y, pos_x)
a__ : str = goal_x
a__ : str = goal_y
a__ : int = g_cost
a__ : Optional[Any] = parent
a__ : Union[str, Any] = self.calculate_heuristic()
def _UpperCamelCase( self : Optional[int] ):
a__ : Dict = abs(self.pos_x - self.goal_x )
a__ : List[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : List[str] , lowerCamelCase__ : Union[str, Any] ):
return self.f_cost < other.f_cost
class A__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase__ : tuple[int, int] , lowerCamelCase__ : tuple[int, int] ):
a__ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase__ )
a__ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowerCamelCase__ )
a__ : Union[str, Any] = [self.start]
a__ : list[Node] = []
a__ : str = False
def _UpperCamelCase( self : str ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a__ : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
a__ : Optional[Any] = True
return self.retrace_path(lowerCamelCase__ )
self.closed_nodes.append(lowerCamelCase__ )
a__ : int = self.get_successors(lowerCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase__ )
else:
# retrieve the best current path
a__ : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase__ )
else:
self.open_nodes.append(lowerCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : Node ):
a__ : List[str] = []
for action in delta:
a__ : Optional[int] = parent.pos_x + action[1]
a__ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase__ , lowerCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase__ , ) )
return successors
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Node | None ):
a__ : int = node
a__ : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a__ : List[str] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
UpperCamelCase : Any = (0, 0)
UpperCamelCase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
UpperCamelCase : Dict = GreedyBestFirst(init, goal)
UpperCamelCase : str = greedy_bf.search()
if path:
for pos_x, pos_y in path:
UpperCamelCase : Any = 2
for elem in grid:
print(elem)
| 151 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : int = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : int = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Dict = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 151 | 1 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( snake_case_ : Tuple , snake_case_ : str , snake_case_ : Tuple ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE__: List[str]= LxmertConfig.from_json_file(snake_case_ )
print(F'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE__: List[str]= LxmertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase_ : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 64 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _lowerCamelCase( lowercase__=None ) -> Optional[int]:
'''simple docstring'''
if subparsers is not None:
__lowercase= subparsers.add_parser('tpu-config' , description=_description )
else:
__lowercase= argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
__lowercase= parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=lowercase__ , default=lowercase__ , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=lowercase__ , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=lowercase__ , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
__lowercase= parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=lowercase__ , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowercase__ ):
__lowercase= load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__lowercase= defaults.command_file
if not args.command and defaults.commands is not None:
__lowercase= defaults.commands
if not args.tpu_name:
__lowercase= defaults.tpu_name
if not args.tpu_zone:
__lowercase= defaults.tpu_zone
if args.accelerate_version == "dev":
__lowercase= 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
__lowercase= 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , lowercase__ ):
__lowercase= F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
__lowercase= [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowercase__ ):
__lowercase= [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__lowercase= ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
__lowercase= '; '.join(lowercase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__lowercase= ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(lowercase__ )}' )
return
subprocess.run(lowercase__ )
print('Successfully setup pod.' )
def _lowerCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= tpu_command_parser()
__lowercase= parser.parse_args()
tpu_command_launcher(lowercase__ )
| 230 | 0 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ : Tuple = 2
class lowerCamelCase__ :
"""simple docstring"""
def __init__(self , *, # begin keyword-only arguments
__a="<s>" , __a="<pad>" , __a="</s>" , __a="<unk>" , __a=None , ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = bos, unk, pad, eos
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = {}
lowerCamelCase = self.add_symbol(__a )
lowerCamelCase = self.add_symbol(__a )
lowerCamelCase = self.add_symbol(__a )
lowerCamelCase = self.add_symbol(__a )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__a )
lowerCamelCase = len(self.symbols )
def __eq__(self , __a ):
'''simple docstring'''
return self.indices == other.indices
def __getitem__(self , __a ):
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__(self ):
'''simple docstring'''
return len(self.symbols )
def __contains__(self , __a ):
'''simple docstring'''
return sym in self.indices
@classmethod
def _a (cls , __a ):
'''simple docstring'''
lowerCamelCase = cls()
d.add_from_file(__a )
return d
def _a (self , __a , __a=1 , __a=False ):
'''simple docstring'''
if word in self.indices and not overwrite:
lowerCamelCase = self.indices[word]
lowerCamelCase = self.count[idx] + n
return idx
else:
lowerCamelCase = len(self.symbols )
lowerCamelCase = idx
self.symbols.append(__a )
self.count.append(__a )
return idx
def _a (self , __a ):
'''simple docstring'''
return 0
def _a (self , __a ):
'''simple docstring'''
if isinstance(__a , __a ):
try:
with open(__a , "r" , encoding="utf-8" ) as fd:
self.add_from_file(__a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(__a ) )
return
lowerCamelCase = f.readlines()
lowerCamelCase = self._load_meta(__a )
for line in lines[indices_start_line:]:
try:
lowerCamelCase , lowerCamelCase = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
lowerCamelCase = True
lowerCamelCase , lowerCamelCase = line.rsplit(" " , 1 )
else:
lowerCamelCase = False
lowerCamelCase = int(__a )
lowerCamelCase = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(__a ) )
self.add_symbol(__a , n=__a , overwrite=__a )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = dict((re.sub(r"@@$" , "" , UpperCAmelCase__ ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , UpperCAmelCase__ ), v) for k, v in d.items() )
lowerCamelCase = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
lowerCamelCase = d[k] # restore
return da
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if not os.path.exists(UpperCAmelCase__ ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
lowerCamelCase = os.path.join(UpperCAmelCase__ , "checkpoint.pt" )
if not os.path.isfile(UpperCAmelCase__ ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
lowerCamelCase = torch.load(UpperCAmelCase__ , map_location="cpu" )
lowerCamelCase = chkpt["cfg"]["model"]
# dicts
lowerCamelCase = os.path.join(UpperCAmelCase__ , "dict.txt" )
if not os.path.isfile(UpperCAmelCase__ ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
lowerCamelCase = Dictionary.load(UpperCAmelCase__ )
lowerCamelCase = rewrite_dict_keys(src_dict.indices )
lowerCamelCase = len(UpperCAmelCase__ )
lowerCamelCase = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES["vocab_file"] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(UpperCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) )
# merges_file (bpecodes)
lowerCamelCase = os.path.join(UpperCAmelCase__ , "bpecodes" )
if not os.path.isfile(UpperCAmelCase__ ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
lowerCamelCase = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(UpperCAmelCase__ , UpperCAmelCase__ )
# model config
lowerCamelCase = os.path.join(UpperCAmelCase__ , "config.json" )
lowerCamelCase = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.0_2,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(UpperCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) )
# tokenizer config
lowerCamelCase = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(UpperCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) )
# model
lowerCamelCase = chkpt["model"]
# remove unneeded keys
lowerCamelCase = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
lowerCamelCase = model_state_dict.pop(UpperCAmelCase__ )
else:
lowerCamelCase = model_state_dict.pop(UpperCAmelCase__ )
lowerCamelCase = BioGptConfig.from_pretrained(UpperCAmelCase__ )
lowerCamelCase = BioGptForCausalLM(UpperCAmelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCAmelCase__ )
# save
lowerCamelCase = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
print("Conversion is done!" )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path) | 713 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Any = 1_6
a_ : Any = 3_2
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ = 16 , UpperCAmelCase__ = "bert-base-cased" ):
"""simple docstring"""
lowerCamelCase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowerCamelCase = load_dataset("glue" , "mrpc" )
def tokenize_function(UpperCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=UpperCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(UpperCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase__ , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(UpperCAmelCase__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowerCamelCase = DataLoader(
tokenized_datasets["train"] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
lowerCamelCase = DataLoader(
tokenized_datasets["validation"] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase = config["lr"]
lowerCamelCase = int(config["num_epochs"] )
lowerCamelCase = int(config["seed"] )
lowerCamelCase = int(config["batch_size"] )
lowerCamelCase = args.model_name_or_path
set_seed(UpperCAmelCase__ )
lowerCamelCase , lowerCamelCase = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
# Instantiate optimizer
lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowerCamelCase = 1
lowerCamelCase = (len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase__ , )
else:
lowerCamelCase = DummyScheduler(UpperCAmelCase__ , total_num_steps=UpperCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase = 0
# Now we train the model
lowerCamelCase = evaluate.load("glue" , "mrpc" )
lowerCamelCase = 0
lowerCamelCase = {}
for epoch in range(UpperCAmelCase__ , UpperCAmelCase__ ):
model.train()
for step, batch in enumerate(UpperCAmelCase__ ):
lowerCamelCase = model(**UpperCAmelCase__ )
lowerCamelCase = outputs.loss
lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowerCamelCase = 0
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase = model(**UpperCAmelCase__ )
lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase , lowerCamelCase = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase__ ) - 1:
lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase__ )
lowerCamelCase = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowerCamelCase = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=UpperCAmelCase__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=UpperCAmelCase__ , )
parser.add_argument(
"--output_dir" , type=UpperCAmelCase__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=UpperCAmelCase__ , default=3 , help="Number of train epochs." , )
lowerCamelCase = parser.parse_args()
lowerCamelCase = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main() | 484 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCAmelCase = 5_0003
UpperCAmelCase = 5_0002
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ = PLBartTokenizer
lowerCAmelCase_ = None
lowerCAmelCase_ = False
def snake_case ( self : Any ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase =PLBartTokenizer(__lowercase , language_codes='base' , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : List[Any] ):
"""simple docstring"""
__lowercase =PLBartTokenizer(__lowercase , language_codes='base' , keep_accents=__lowercase )
__lowercase =tokenizer.tokenize('This is a test' )
self.assertListEqual(__lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowercase =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__lowercase =tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowercase =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
__lowercase =tokenizer.vocab_size
__lowercase =[tokenizer.convert_ids_to_tokens(__lowercase ) for x in range(end - 4 , __lowercase )]
self.assertListEqual(__lowercase , ['__java__', '__python__', '__en_XX__', '<mask>'] )
__lowercase ='java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
__lowercase =tokenizer(__lowercase ).input_ids
self.assertEqual(
tokenizer.decode(__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase ) , __lowercase , )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =PLBartTokenizer(__lowercase , language_codes='multi' , keep_accents=__lowercase )
__lowercase =tokenizer.tokenize('This is a test' )
self.assertListEqual(__lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowercase =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__lowercase =tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowercase =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
__lowercase =tokenizer.vocab_size
__lowercase =[tokenizer.convert_ids_to_tokens(__lowercase ) for x in range(end - 7 , __lowercase )]
self.assertListEqual(
__lowercase , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
__lowercase ='java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
__lowercase =tokenizer(__lowercase ).input_ids
self.assertEqual(
tokenizer.decode(__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase ) , __lowercase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase_ = "uclanlp/plbart-python-en_XX"
lowerCAmelCase_ = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
lowerCAmelCase_ = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
lowerCAmelCase_ = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def snake_case ( cls : Tuple ):
"""simple docstring"""
__lowercase =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
__lowercase =1
return cls
def snake_case ( self : str ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 50003 )
def snake_case ( self : Tuple ):
"""simple docstring"""
__lowercase =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
def snake_case ( self : str ):
"""simple docstring"""
self.assertIn(__lowercase , self.tokenizer.all_special_ids )
__lowercase =[EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
__lowercase =self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
__lowercase =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertNotIn(self.tokenizer.eos_token , __lowercase )
def snake_case ( self : Any ):
"""simple docstring"""
__lowercase =['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] , __lowercase )
__lowercase =10
__lowercase =self.tokenizer(__lowercase , max_length=__lowercase , truncation=__lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowercase )
self.assertEqual(len(__lowercase ) , __lowercase )
def snake_case ( self : Dict ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [50004, 50001] )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
__lowercase =tempfile.mkdtemp()
__lowercase =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowercase )
__lowercase =PLBartTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowercase )
@require_torch
def snake_case ( self : Tuple ):
"""simple docstring"""
__lowercase =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowercase , return_tensors='pt' )
__lowercase =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __lowercase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def snake_case ( self : Optional[int] ):
"""simple docstring"""
__lowercase =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__lowercase =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
__lowercase =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =self.tokenizer(self.src_text , padding=__lowercase , truncation=__lowercase , max_length=3 , return_tensors='pt' )
__lowercase =self.tokenizer(
text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=10 , return_tensors='pt' )
__lowercase =targets['input_ids']
__lowercase =shift_tokens_right(__lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case ( self : Dict ):
"""simple docstring"""
__lowercase =self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(__lowercase ) , {
# A, test, EOS, en_XX
'input_ids': [[150, 242, 2, 50003]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 50001,
} , )
| 119 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCamelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any]=1_3 , lowerCamelCase : str=7 , lowerCamelCase : Tuple=True , lowerCamelCase : Dict=True , lowerCamelCase : Tuple=True , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Union[str, Any]=9_9 , lowerCamelCase : str=3_2 , lowerCamelCase : List[Any]=5 , lowerCamelCase : Optional[int]=4 , lowerCamelCase : int=3_7 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Any=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Optional[Any]=5_1_2 , lowerCamelCase : Dict=1_6 , lowerCamelCase : List[str]=2 , lowerCamelCase : Union[str, Any]=0.02 , lowerCamelCase : List[str]=4 , ):
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_attention_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_choices
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_attention_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __a ( self : Tuple ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ = config_and_inputs
a__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( __lowerCAmelCase ,unittest.TestCase ):
lowerCAmelCase__ : str = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __a ( self : int ):
'''simple docstring'''
a__ = FlaxAlbertModelTester(self )
@slow
def __a ( self : int ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a__ = model_class_name.from_pretrained("albert-base-v2" )
a__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase )
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Any ):
'''simple docstring'''
a__ = FlaxAlbertModel.from_pretrained("albert-base-v2" )
a__ = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
a__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a__ = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
a__ = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase )
a__ = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase , atol=1e-4 ) )
| 489 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_lowercase : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'upernet'
def __init__( self : Optional[Any], lowerCamelCase : Any=None, lowerCamelCase : List[Any]=512, lowerCamelCase : Any=0.02, lowerCamelCase : Any=[1, 2, 3, 6], lowerCamelCase : int=True, lowerCamelCase : Any=0.4, lowerCamelCase : Tuple=384, lowerCamelCase : Tuple=256, lowerCamelCase : int=1, lowerCamelCase : Any=False, lowerCamelCase : List[str]=255, **lowerCamelCase : Optional[Any], )-> Tuple:
super().__init__(**lowerCamelCase )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowerCamelCase__ : Optional[Any] =CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] =backbone_config.get('''model_type''' )
lowerCamelCase__ : Optional[Any] =CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : List[str] =config_class.from_dict(lowerCamelCase )
lowerCamelCase__ : int =backbone_config
lowerCamelCase__ : Any =hidden_size
lowerCamelCase__ : int =initializer_range
lowerCamelCase__ : Tuple =pool_scales
lowerCamelCase__ : Optional[int] =use_auxiliary_head
lowerCamelCase__ : int =auxiliary_loss_weight
lowerCamelCase__ : List[Any] =auxiliary_in_channels
lowerCamelCase__ : Tuple =auxiliary_channels
lowerCamelCase__ : Tuple =auxiliary_num_convs
lowerCamelCase__ : List[Any] =auxiliary_concat_input
lowerCamelCase__ : Dict =loss_ignore_index
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : List[str] =copy.deepcopy(self.__dict__ )
lowerCamelCase__ : Union[str, Any] =self.backbone_config.to_dict()
lowerCamelCase__ : str =self.__class__.model_type
return output
| 625 |
"""simple docstring"""
_lowercase : str = 0 # The first color of the flag.
_lowercase : Dict = 1 # The second color of the flag.
_lowercase : Tuple = 2 # The third color of the flag.
_lowercase : Optional[int] = (red, white, blue)
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
if not sequence:
return []
if len(__lowerCamelCase ) == 1:
return list(__lowerCamelCase )
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Dict =len(__lowerCamelCase ) - 1
lowerCamelCase__ : Tuple =0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =sequence[high], sequence[mid]
high -= 1
else:
lowerCamelCase__ : Dict =f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[Any] = input("Enter numbers separated by commas:\n").strip()
_lowercase : int = [int(item.strip()) for item in user_input.split(",")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 625 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=_a , )
assert hasattr(self , '''env''' )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
__A ={
'''enabled''': True,
'''processes_per_host''': 8,
}
__A ={
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
__A ={'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
__A ='''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=_a , py_version='''py36''' , )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
TrainingJobAnalytics(_a ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
__A =self.create_estimator(_a )
# run training
estimator.fit()
# result dataframe
__A =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__A =list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__A =list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__A =(
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _a )
| 184 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''ZinengTang/tvlt-base'''
snake_case__ = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[Any] ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , **_a:Tuple ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = feature_extractor(_a , return_tensors='''np''' )
snake_case__ = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = image_processor(_a , return_tensors='''np''' )
snake_case__ = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 33 | 0 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowercase_ : Optional[Any] = 1_0
def A__ ( snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ):
for i in range(snake_case_ , snake_case_ ):
if array[i] == target:
return i
return -1
def A__ ( snake_case_ : list[int] , snake_case_ : int ):
SCREAMING_SNAKE_CASE__: Any= 0
SCREAMING_SNAKE_CASE__: int= len(snake_case_ )
while left <= right:
if right - left < precision:
return lin_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE__: List[Any]= (left + right) // 3 + 1
SCREAMING_SNAKE_CASE__: Any= 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= one_third - 1
elif array[two_third] < target:
SCREAMING_SNAKE_CASE__: List[str]= two_third + 1
else:
SCREAMING_SNAKE_CASE__: List[str]= one_third + 1
SCREAMING_SNAKE_CASE__: Union[str, Any]= two_third - 1
else:
return -1
def A__ ( snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ):
if left < right:
if right - left < precision:
return lin_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE__: str= (left + right) // 3 + 1
SCREAMING_SNAKE_CASE__: Optional[Any]= 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(snake_case_ , one_third - 1 , snake_case_ , snake_case_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , snake_case_ , snake_case_ , snake_case_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , snake_case_ , snake_case_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ : str = input('Enter numbers separated by comma:\n').strip()
lowercase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
lowercase_ : List[Any] = int(input('Enter the number to be found in the list:\n').strip())
lowercase_ : Tuple = ite_ternary_search(collection, target)
lowercase_ : Any = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('Not found')
| 107 | import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=18 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , ) -> Any:
SCREAMING_SNAKE_CASE__: Tuple= size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: Tuple= batch_size
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: List[Any]= image_size
SCREAMING_SNAKE_CASE__: Dict= min_resolution
SCREAMING_SNAKE_CASE__: Union[str, Any]= max_resolution
SCREAMING_SNAKE_CASE__: Optional[Any]= do_resize
SCREAMING_SNAKE_CASE__: List[Any]= size
SCREAMING_SNAKE_CASE__: Optional[Any]= apply_ocr
def UpperCamelCase_ ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: List[Any]= LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''apply_ocr''' ) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def UpperCamelCase_ ( self ) -> Any:
pass
def UpperCamelCase_ ( self ) -> List[str]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: int= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Optional[int]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: str= image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase )
self.assertIsInstance(encoding.boxes , lowerCAmelCase )
# Test batched
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Dict= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: Dict= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: int= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Optional[Any]:
# with apply_OCR = True
SCREAMING_SNAKE_CASE__: int= LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: int= load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
SCREAMING_SNAKE_CASE__: str= Image.open(ds[0]['''file'''] ).convert('''RGB''' )
SCREAMING_SNAKE_CASE__: str= image_processing(lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE__: Dict= [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
SCREAMING_SNAKE_CASE__: List[Any]= [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase )
self.assertListEqual(encoding.boxes , lowerCAmelCase )
# with apply_OCR = False
SCREAMING_SNAKE_CASE__: int= LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 107 | 1 |
'''simple docstring'''
from math import pi, sqrt
def __UpperCamelCase ( a : float ) ->float:
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(a ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(a )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __UpperCamelCase ( ) ->None:
assert gamma(0.5 ) == sqrt(a )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = 1.0
while num:
_lowercase = float(input('Gamma of: '))
print(f'gamma({num}) = {gamma(num)}')
print('\nEnter 0 to exit...')
| 342 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowercase = logging.get_logger(__name__)
class _lowercase :
def __init__( self , A__ , A__ ) -> Tuple:
snake_case = question_encoder
snake_case = generator
snake_case = self.question_encoder
def UpperCamelCase ( self , A__ ) -> int:
if os.path.isfile(A__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(A__ , exist_ok=A__ )
snake_case = os.path.join(A__ , '''question_encoder_tokenizer''' )
snake_case = os.path.join(A__ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(A__ )
self.generator.save_pretrained(A__ )
@classmethod
def UpperCamelCase ( cls , A__ , **A__ ) -> List[Any]:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
snake_case = kwargs.pop('''config''' , A__ )
if config is None:
snake_case = RagConfig.from_pretrained(A__ )
snake_case = AutoTokenizer.from_pretrained(
A__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
snake_case = AutoTokenizer.from_pretrained(
A__ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=A__ , generator=A__ )
def __call__( self , *A__ , **A__ ) -> Any:
return self.current_tokenizer(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Tuple:
return self.generator.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Tuple:
return self.generator.decode(*A__ , **A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.question_encoder
def UpperCamelCase ( self ) -> str:
snake_case = self.generator
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , A__ = "longest" , A__ = None , A__ = True , **A__ , ) -> BatchEncoding:
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , A__ , )
if max_length is None:
snake_case = self.current_tokenizer.model_max_length
snake_case = self(
A__ , add_special_tokens=A__ , return_tensors=A__ , max_length=A__ , padding=A__ , truncation=A__ , **A__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case = self.current_tokenizer.model_max_length
snake_case = self(
text_target=A__ , add_special_tokens=A__ , return_tensors=A__ , padding=A__ , max_length=A__ , truncation=A__ , **A__ , )
snake_case = labels['''input_ids''']
return model_inputs
| 342 | 1 |
def __lowercase( __snake_case : float ,__snake_case : float ) -> float:
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCamelCase (unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=18 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=400 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , ):
__snake_case = size if size is not None else {'height': 18, 'width': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = apply_ocr
def __lowerCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCamelCase (lowerCamelCase , unittest.TestCase ):
lowercase__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __lowerCamelCase ( self ):
__snake_case = LayoutLMvaImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'apply_ocr' ) )
def __lowerCamelCase ( self ):
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(encoding.boxes , SCREAMING_SNAKE_CASE_ )
# Test batched
__snake_case = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__snake_case = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__snake_case = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __lowerCamelCase ( self ):
# with apply_OCR = True
__snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
__snake_case = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__snake_case = Image.open(ds[0]['file'] ).convert('RGB' )
__snake_case = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__snake_case = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__snake_case = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(encoding.boxes , SCREAMING_SNAKE_CASE_ )
# with apply_OCR = False
__snake_case = LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE_ )
__snake_case = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 345 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = KandinskyInpaintPipeline
_snake_case : int = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_snake_case : Any = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_snake_case : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_snake_case : Tuple = False
@property
def __a ( self :Optional[int] ):
return 32
@property
def __a ( self :Any ):
return 32
@property
def __a ( self :str ):
return self.time_input_dim
@property
def __a ( self :List[Any] ):
return self.time_input_dim * 4
@property
def __a ( self :Dict ):
return 1_00
@property
def __a ( self :List[str] ):
UpperCamelCase__ :Any = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __a ( self :Dict ):
torch.manual_seed(0 )
UpperCamelCase__ :Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
UpperCamelCase__ :Optional[Any] = MultilingualCLIP(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = text_encoder.eval()
return text_encoder
@property
def __a ( self :Dict ):
torch.manual_seed(0 )
UpperCamelCase__ :Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCamelCase__ :Optional[Any] = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def __a ( self :List[Any] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self :str ):
torch.manual_seed(0 )
UpperCamelCase__ :Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[Any] = self.dummy_text_encoder
UpperCamelCase__ :str = self.dummy_tokenizer
UpperCamelCase__ :List[str] = self.dummy_unet
UpperCamelCase__ :Any = self.dummy_movq
UpperCamelCase__ :Optional[int] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowerCamelCase__ , )
UpperCamelCase__ :Optional[int] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int=0 ):
UpperCamelCase__ :Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
UpperCamelCase__ :int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCamelCase__ )
# create init_image
UpperCamelCase__ :Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
UpperCamelCase__ :str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ :Any = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
UpperCamelCase__ :Any = np.ones((64, 64) , dtype=np.floataa )
UpperCamelCase__ :Optional[int] = 0
if str(lowerCamelCase__ ).startswith("""mps""" ):
UpperCamelCase__ :List[str] = torch.manual_seed(lowerCamelCase__ )
else:
UpperCamelCase__ :str = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCamelCase__ :List[str] = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def __a ( self :str ):
UpperCamelCase__ :Dict = """cpu"""
UpperCamelCase__ :Any = self.get_dummy_components()
UpperCamelCase__ :List[Any] = self.pipeline_class(**lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
UpperCamelCase__ :List[str] = output.images
UpperCamelCase__ :str = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
UpperCamelCase__ :str = image[0, -3:, -3:, -1]
UpperCamelCase__ :Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ :Optional[int] = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __a ( self :Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
UpperCamelCase__ :List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCamelCase__ :List[Any] = np.ones((7_68, 7_68) , dtype=np.floataa )
UpperCamelCase__ :Dict = 0
UpperCamelCase__ :Optional[int] = """a hat"""
UpperCamelCase__ :Dict = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
UpperCamelCase__ :Optional[Any] = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase__ , UpperCamelCase__ :str = pipe_prior(
lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCamelCase__ :int = pipeline(
lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
UpperCamelCase__ :List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ ) | 45 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A ( lowercase__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""]
UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 45 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCAmelCase = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
__UpperCAmelCase = {
'RUCAIBox/mvp': 10_24,
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Any = VOCAB_FILES_NAMES
UpperCAmelCase_ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ :List[Any] = ["input_ids", "attention_mask"]
UpperCAmelCase_ :Optional[Any] = MvpTokenizer
def __init__( self , __A=None , __A=None , __A=None , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , __A=True , **__A , ) -> Optional[int]:
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
lowerCAmelCase_ :List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __A ) != add_prefix_space:
lowerCAmelCase_ :List[Any] = getattr(__A , pre_tok_state.pop("""type""" ) )
lowerCAmelCase_ :List[Any] = add_prefix_space
lowerCAmelCase_ :Any = pre_tok_class(**__A )
lowerCAmelCase_ :int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCAmelCase_ :List[Any] = """post_processor"""
lowerCAmelCase_ :Optional[int] = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
lowerCAmelCase_ :Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase_ :int = tuple(state["""sep"""] )
if "cls" in state:
lowerCAmelCase_ :Any = tuple(state["""cls"""] )
lowerCAmelCase_ :Union[str, Any] = False
if state.get("""add_prefix_space""" , __A ) != add_prefix_space:
lowerCAmelCase_ :Tuple = add_prefix_space
lowerCAmelCase_ :List[str] = True
if state.get("""trim_offsets""" , __A ) != trim_offsets:
lowerCAmelCase_ :Dict = trim_offsets
lowerCAmelCase_ :List[Any] = True
if changes_to_apply:
lowerCAmelCase_ :Optional[Any] = getattr(__A , state.pop("""type""" ) )
lowerCAmelCase_ :List[Any] = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , __A ) -> Any:
lowerCAmelCase_ :List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
lowerCAmelCase_ :List[Any] = value
def __lowerCAmelCase ( self , *__A , **__A ) -> BatchEncoding:
lowerCAmelCase_ :Dict = kwargs.get("""is_split_into_words""" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__A , **__A )
def __lowerCAmelCase ( self , *__A , **__A ) -> BatchEncoding:
lowerCAmelCase_ :List[str] = kwargs.get("""is_split_into_words""" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]:
lowerCAmelCase_ :Optional[Any] = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def __lowerCAmelCase ( self , __A , __A=None ) -> Dict:
lowerCAmelCase_ :str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]:
lowerCAmelCase_ :Tuple = [self.sep_token_id]
lowerCAmelCase_ :Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 256 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=2 , __A=32 , __A=16 , __A=3 , __A=True , __A=True , __A=32 , __A=4 , __A=[0, 1, 2, 3] , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=0.0_2 , __A=3 , __A=[1, 384, 24, 24] , __A=True , __A=None , ) -> Optional[Any]:
lowerCAmelCase_ :Optional[Any] = parent
lowerCAmelCase_ :Optional[int] = batch_size
lowerCAmelCase_ :Dict = image_size
lowerCAmelCase_ :Tuple = patch_size
lowerCAmelCase_ :Union[str, Any] = num_channels
lowerCAmelCase_ :Tuple = is_training
lowerCAmelCase_ :Dict = use_labels
lowerCAmelCase_ :Union[str, Any] = hidden_size
lowerCAmelCase_ :Union[str, Any] = num_hidden_layers
lowerCAmelCase_ :List[Any] = backbone_out_indices
lowerCAmelCase_ :Optional[int] = num_attention_heads
lowerCAmelCase_ :List[str] = intermediate_size
lowerCAmelCase_ :Any = hidden_act
lowerCAmelCase_ :str = hidden_dropout_prob
lowerCAmelCase_ :Tuple = attention_probs_dropout_prob
lowerCAmelCase_ :List[Any] = initializer_range
lowerCAmelCase_ :Optional[int] = num_labels
lowerCAmelCase_ :List[Any] = backbone_featmap_shape
lowerCAmelCase_ :List[Any] = scope
lowerCAmelCase_ :Tuple = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase_ :Optional[int] = (image_size // patch_size) ** 2
lowerCAmelCase_ :Optional[Any] = num_patches + 1
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ :List[str] = None
if self.use_labels:
lowerCAmelCase_ :Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase_ :List[str] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__A , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCAmelCase ( self , __A , __A , __A ) -> List[Any]:
lowerCAmelCase_ :Dict = DPTModel(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :List[str] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A ) -> List[str]:
lowerCAmelCase_ :Union[str, Any] = self.num_labels
lowerCAmelCase_ :Optional[Any] = DPTForDepthEstimation(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Dict = model(__A )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCAmelCase ( self , __A , __A , __A ) -> List[Any]:
lowerCAmelCase_ :int = self.num_labels
lowerCAmelCase_ :Optional[int] = DPTForSemanticSegmentation(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Union[str, Any] = model(__A , labels=__A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = config_and_inputs
lowerCAmelCase_ :str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :str = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCAmelCase_ :Union[str, Any] = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ :Optional[Any] = False
UpperCAmelCase_ :List[str] = False
UpperCAmelCase_ :Dict = False
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :List[str] = DPTModelTester(self )
lowerCAmelCase_ :List[str] = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __lowerCAmelCase ( self ) -> List[str]:
pass
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :Dict = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :Union[str, Any] = model_class(__A )
lowerCAmelCase_ :int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ :int = [*signature.parameters.keys()]
lowerCAmelCase_ :int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
def __lowerCAmelCase ( self ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCAmelCase_ , lowerCAmelCase_ :str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Any = True
if model_class in get_values(__A ):
continue
lowerCAmelCase_ :Union[str, Any] = model_class(__A )
model.to(__A )
model.train()
lowerCAmelCase_ :Optional[int] = self._prepare_for_class(__A , __A , return_labels=__A )
lowerCAmelCase_ :Dict = model(**__A ).loss
loss.backward()
def __lowerCAmelCase ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCAmelCase_ , lowerCAmelCase_ :str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Dict = False
lowerCAmelCase_ :Optional[Any] = True
if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing:
continue
lowerCAmelCase_ :Optional[Any] = model_class(__A )
model.to(__A )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase_ :Any = self._prepare_for_class(__A , __A , return_labels=__A )
lowerCAmelCase_ :Dict = model(**__A ).loss
loss.backward()
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ , lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :int = _config_zero_init(__A )
for model_class in self.all_model_classes:
lowerCAmelCase_ :str = model_class(config=__A )
# Skip the check for the backbone
lowerCAmelCase_ :List[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowerCAmelCase_ :Tuple = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCAmelCase ( self ) -> List[str]:
pass
@slow
def __lowerCAmelCase ( self ) -> List[str]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowerCAmelCase_ :List[Any] = DPTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __lowerCAmelCase ( self ) -> int:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :List[Any] = """add"""
with self.assertRaises(__A ):
lowerCAmelCase_ :Optional[Any] = DPTForDepthEstimation(__A )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[str] = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
lowerCAmelCase_ :str = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__A )
lowerCAmelCase_ :Any = prepare_img()
lowerCAmelCase_ :str = image_processor(images=__A , return_tensors="""pt""" ).to(__A )
# forward pass
with torch.no_grad():
lowerCAmelCase_ :Union[str, Any] = model(**__A )
lowerCAmelCase_ :str = outputs.predicted_depth
# verify the predicted depth
lowerCAmelCase_ :Tuple = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __A )
lowerCAmelCase_ :int = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __A , atol=1E-4 ) )
| 256 | 1 |
import numpy as np
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ):
return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): # picklable for multiprocessing
return x.sum()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): # picklable for multiprocessing
return i + 1
@dataclass
class _snake_case :
UpperCamelCase__ : int
UpperCamelCase__ : str
class _snake_case ( lowercase__):
def A__ ( self : str ):
lowercase__ = {}
lowercase__ = []
lowercase__ = 1
lowercase__ = [1, 2]
lowercase__ = {"a": 1, "b": 2}
lowercase__ = {"a": [1, 2], "b": [3, 4]}
lowercase__ = {"a": {"1": 1}, "b": 2}
lowercase__ = {"a": 1, "b": 2, "c": 3, "d": 4}
lowercase__ = {}
lowercase__ = []
lowercase__ = 2
lowercase__ = [2, 3]
lowercase__ = {"a": 2, "b": 3}
lowercase__ = {"a": [2, 3], "b": [4, 5]}
lowercase__ = {"a": {"1": 2}, "b": 3}
lowercase__ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(__lowercase, __lowercase ), __lowercase )
self.assertEqual(map_nested(__lowercase, __lowercase ), __lowercase )
self.assertEqual(map_nested(__lowercase, __lowercase ), __lowercase )
self.assertEqual(map_nested(__lowercase, __lowercase ), __lowercase )
self.assertEqual(map_nested(__lowercase, __lowercase ), __lowercase )
self.assertEqual(map_nested(__lowercase, __lowercase ), __lowercase )
self.assertEqual(map_nested(__lowercase, __lowercase ), __lowercase )
self.assertEqual(map_nested(__lowercase, __lowercase ), __lowercase )
lowercase__ = 2
self.assertEqual(map_nested(__lowercase, __lowercase, num_proc=__lowercase ), __lowercase )
self.assertEqual(map_nested(__lowercase, __lowercase, num_proc=__lowercase ), __lowercase )
self.assertEqual(map_nested(__lowercase, __lowercase, num_proc=__lowercase ), __lowercase )
self.assertEqual(map_nested(__lowercase, __lowercase, num_proc=__lowercase ), __lowercase )
self.assertEqual(map_nested(__lowercase, __lowercase, num_proc=__lowercase ), __lowercase )
self.assertEqual(map_nested(__lowercase, __lowercase, num_proc=__lowercase ), __lowercase )
self.assertEqual(map_nested(__lowercase, __lowercase, num_proc=__lowercase ), __lowercase )
self.assertEqual(map_nested(__lowercase, __lowercase, num_proc=__lowercase ), __lowercase )
lowercase__ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
lowercase__ = {"a": 2, "b": 0, "c": 2}
lowercase__ = {
"a": np.eye(2 ).astype(__lowercase ),
"b": np.zeros(3 ).astype(__lowercase ),
"c": np.ones(2 ).astype(__lowercase ),
}
self.assertEqual(map_nested(__lowercase, __lowercase, map_numpy=__lowercase ), __lowercase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__lowercase, __lowercase, map_numpy=__lowercase ).items()}, {k: v.tolist() for k, v in expected_map_nested_sna_int.items()}, )
self.assertEqual(map_nested(__lowercase, __lowercase, map_numpy=__lowercase, num_proc=__lowercase ), __lowercase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__lowercase, __lowercase, map_numpy=__lowercase, num_proc=__lowercase ).items()}, {k: v.tolist() for k, v in expected_map_nested_sna_int.items()}, )
with self.assertRaises(__lowercase ): # can't pickle a local lambda
map_nested(lambda __lowercase : x + 1, __lowercase, num_proc=__lowercase )
def A__ ( self : int ):
lowercase__ = {"a": 1, "b": 2}
lowercase__ = {"a": 3, "b": 4}
lowercase__ = {"a": 5, "b": 6}
lowercase__ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__lowercase, __lowercase, __lowercase ) ), __lowercase )
def A__ ( self : Optional[int] ):
class _snake_case :
UpperCamelCase__ : Optional[int] ="""bar"""
lowercase__ = Foo()
self.assertEqual(foo.my_attr, "bar" )
with temporary_assignment(__lowercase, "my_attr", "BAR" ):
self.assertEqual(foo.my_attr, "BAR" )
self.assertEqual(foo.my_attr, "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
lowercase__ = {f'''{i}''': i for i in range(SCREAMING_SNAKE_CASE_ )}
lowercase__ = map_nested(lambda SCREAMING_SNAKE_CASE_ : x + 10 , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _snake_case ( lowercase__):
@require_tf
def A__ ( self : Union[str, Any] ):
import tensorflow as tf
from tensorflow.keras import layers
lowercase__ = layers.Dense(2 )
def gen_random_output():
lowercase__ = tf.random.uniform((1, 3) )
return model(__lowercase ).numpy()
with temp_seed(42, set_tensorflow=__lowercase ):
lowercase__ = gen_random_output()
with temp_seed(42, set_tensorflow=__lowercase ):
lowercase__ = gen_random_output()
lowercase__ = gen_random_output()
np.testing.assert_equal(__lowercase, __lowercase )
self.assertGreater(np.abs(outa - outa ).sum(), 0 )
@require_torch
def A__ ( self : int ):
import torch
def gen_random_output():
lowercase__ = torch.nn.Linear(3, 2 )
lowercase__ = torch.rand(1, 3 )
return model(__lowercase ).detach().numpy()
with temp_seed(42, set_pytorch=__lowercase ):
lowercase__ = gen_random_output()
with temp_seed(42, set_pytorch=__lowercase ):
lowercase__ = gen_random_output()
lowercase__ = gen_random_output()
np.testing.assert_equal(__lowercase, __lowercase )
self.assertGreater(np.abs(outa - outa ).sum(), 0 )
def A__ ( self : Optional[int] ):
def gen_random_output():
return np.random.rand(1, 3 )
with temp_seed(42 ):
lowercase__ = gen_random_output()
with temp_seed(42 ):
lowercase__ = gen_random_output()
lowercase__ = gen_random_output()
np.testing.assert_equal(__lowercase, __lowercase )
self.assertGreater(np.abs(outa - outa ).sum(), 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = NestedDataStructure(SCREAMING_SNAKE_CASE_ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = NestedDataStructure(SCREAMING_SNAKE_CASE_ ).flatten()
assert output == expected_output
def __lowerCAmelCase ( ):
lowercase__ = A(x=1 , y="foobar" )
lowercase__ = {"x": 1, "y": "foobar"}
assert asdict(SCREAMING_SNAKE_CASE_ ) == expected_output
lowercase__ = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
lowercase__ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(SCREAMING_SNAKE_CASE_ ) == expected_output
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
asdict([1, A(x=10 , y="foo" )] )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return text.split()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __lowerCAmelCase ( ):
with Pool(2 ) as pool:
lowercase__ = list(iflatmap_unordered(SCREAMING_SNAKE_CASE_ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(SCREAMING_SNAKE_CASE_ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowercase__ = list(iflatmap_unordered(SCREAMING_SNAKE_CASE_ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(SCREAMING_SNAKE_CASE_ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowercase__ = []
for yield_time, content in iflatmap_unordered(
SCREAMING_SNAKE_CASE_ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(SCREAMING_SNAKE_CASE_ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(SCREAMING_SNAKE_CASE_ ) == 4
| 413 | 0 |
def _snake_case ( __snake_case , __snake_case ) -> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(__snake_case ) , __snake_case )
return number - int(__snake_case )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 455 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
__lowerCamelCase = False
__lowerCamelCase = False
def _snake_case ( __snake_case ) -> Any:
'''simple docstring'''
return TrainCommand(__snake_case )
class snake_case_ (lowercase__ ):
"""simple docstring"""
@staticmethod
def A_ ( lowercase):
"""simple docstring"""
UpperCAmelCase_ : str = parser.add_parser("train" ,help="CLI tool to train a model on a task.")
train_parser.add_argument(
"--train_data" ,type=lowercase ,required=lowercase ,help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." ,)
train_parser.add_argument(
"--column_label" ,type=lowercase ,default=0 ,help="Column of the dataset csv file with example labels.")
train_parser.add_argument(
"--column_text" ,type=lowercase ,default=1 ,help="Column of the dataset csv file with example texts.")
train_parser.add_argument(
"--column_id" ,type=lowercase ,default=2 ,help="Column of the dataset csv file with example ids.")
train_parser.add_argument(
"--skip_first_row" ,action="store_true" ,help="Skip the first row of the csv file (headers).")
train_parser.add_argument("--validation_data" ,type=lowercase ,default="" ,help="path to validation dataset.")
train_parser.add_argument(
"--validation_split" ,type=lowercase ,default=0.1 ,help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." ,)
train_parser.add_argument("--output" ,type=lowercase ,default="./" ,help="path to saved the trained model.")
train_parser.add_argument(
"--task" ,type=lowercase ,default="text_classification" ,help="Task to train the model on.")
train_parser.add_argument(
"--model" ,type=lowercase ,default="bert-base-uncased" ,help="Model's name or path to stored model.")
train_parser.add_argument("--train_batch_size" ,type=lowercase ,default=32 ,help="Batch size for training.")
train_parser.add_argument("--valid_batch_size" ,type=lowercase ,default=64 ,help="Batch size for validation.")
train_parser.add_argument("--learning_rate" ,type=lowercase ,default=3E-5 ,help="Learning rate.")
train_parser.add_argument("--adam_epsilon" ,type=lowercase ,default=1E-08 ,help="Epsilon for Adam optimizer.")
train_parser.set_defaults(func=lowercase)
def __init__( self ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = logging.get_logger("transformers-cli/training")
UpperCAmelCase_ : List[Any] = "tf" if is_tf_available() else "torch"
os.makedirs(args.output ,exist_ok=lowercase)
UpperCAmelCase_ : Union[str, Any] = args.output
UpperCAmelCase_ : Optional[int] = args.column_label
UpperCAmelCase_ : Dict = args.column_text
UpperCAmelCase_ : Optional[Any] = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""")
if args.task == "text_classification":
UpperCAmelCase_ : List[Any] = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""")
UpperCAmelCase_ : str = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
UpperCAmelCase_ : str = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""")
UpperCAmelCase_ : Union[str, Any] = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
UpperCAmelCase_ : List[str] = args.validation_split
UpperCAmelCase_ : Tuple = args.train_batch_size
UpperCAmelCase_ : Optional[int] = args.valid_batch_size
UpperCAmelCase_ : Dict = args.learning_rate
UpperCAmelCase_ : List[str] = args.adam_epsilon
def A_ ( self):
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def A_ ( self):
"""simple docstring"""
raise NotImplementedError
def A_ ( self):
"""simple docstring"""
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 455 | 1 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: dict ):
SCREAMING_SNAKE_CASE__ = set()
# edges = list of graph's edges
SCREAMING_SNAKE_CASE__ = get_edges(UpperCamelCase__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = edges.pop()
chosen_vertices.add(UpperCamelCase__ )
chosen_vertices.add(UpperCamelCase__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(UpperCamelCase__ )
return chosen_vertices
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: dict ):
SCREAMING_SNAKE_CASE__ = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}") | 6 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
lowercase : List[str] = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
lowercase : int = {
'''vinai/phobert-base''': 2_56,
'''vinai/phobert-large''': 2_56,
}
def lowerCAmelCase__ ( _a : List[str] ):
snake_case_ : str = set()
snake_case_ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : List[Any] = char
snake_case_ : Any = set(_a )
return pairs
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : str = VOCAB_FILES_NAMES
A : List[str] = PRETRAINED_VOCAB_FILES_MAP
A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ : List[Any] = vocab_file
snake_case_ : Any = merges_file
snake_case_ : Any = {}
snake_case_ : Union[str, Any] = 0
snake_case_ : Union[str, Any] = 1
snake_case_ : Optional[int] = 2
snake_case_ : Optional[int] = 3
self.add_from_file(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = {v: k for k, v in self.encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
snake_case_ : List[Any] = merges_handle.read().split("\n" )[:-1]
snake_case_ : Optional[int] = [tuple(merge.split()[:-1] ) for merge in merges]
snake_case_ : List[str] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case_ : Optional[Any] = {}
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
snake_case_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ) -> Dict:
return len(self.encoder )
def _lowerCAmelCase ( self ) -> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
snake_case_ : List[Any] = tuple(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
snake_case_ : str = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
snake_case_ : List[str] = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ : Dict = bigram
snake_case_ : Any = []
snake_case_ : Any = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
snake_case_ : List[str] = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : Optional[Any] = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : Union[str, Any] = tuple(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
snake_case_ : List[Any] = get_pairs(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = "@@ ".join(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = word[:-4]
snake_case_ : Union[str, Any] = word
return word
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ : List[Any] = []
snake_case_ : str = re.findall(r"\S+\n?" , _SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(" " ) ) )
return split_tokens
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> int:
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ : Any = " ".join(_SCREAMING_SNAKE_CASE ).replace("@@ " , "" ).strip()
return out_string
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : Optional[int] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
if os.path.abspath(self.merges_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.merges_file , _SCREAMING_SNAKE_CASE )
return out_vocab_file, out_merge_file
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_SCREAMING_SNAKE_CASE )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
snake_case_ : Dict = f.readlines()
for lineTmp in lines:
snake_case_ : Tuple = lineTmp.strip()
snake_case_ : Dict = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
snake_case_ : Optional[Any] = line[:idx]
snake_case_ : List[Any] = len(self.encoder )
| 568 | 0 |
from __future__ import annotations
from collections import Counter
from random import random
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] ):
SCREAMING_SNAKE_CASE = {}
def _snake_case ( self : str , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = {}
def _snake_case ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : float ):
if nodea not in self.connections:
self.add_node(__lowerCamelCase )
if nodea not in self.connections:
self.add_node(__lowerCamelCase )
SCREAMING_SNAKE_CASE = probability
def _snake_case ( self : Optional[Any] ):
return list(self.connections )
def _snake_case ( self : Tuple , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __a ( A__ : str , A__ : list[tuple[str, str, float]] , A__ : int ):
SCREAMING_SNAKE_CASE = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE = Counter(graph.get_nodes() )
SCREAMING_SNAKE_CASE = start
for _ in range(A__ ):
SCREAMING_SNAKE_CASE = graph.transition(A__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod() | 716 |
import cmath
import math
def __a ( A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = math.radians(A__ )
SCREAMING_SNAKE_CASE = math.radians(A__ )
# Convert voltage and current to rectangular form
SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ )
SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 698 | 0 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
lowerCamelCase__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowerCamelCase__ = len(_lowerCAmelCase ) - 1
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCamelCase__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree ,_lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_lowerCAmelCase ) ,5 ) == 1
return output_values
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCamelCase__ = self.basis_function(_lowerCAmelCase )
lowerCamelCase__ = 0.0
lowerCamelCase__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCamelCase_ ( self ,_lowerCAmelCase = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
lowerCamelCase__ = [] # x coordinates of points to plot
lowerCamelCase__ = [] # y coordinates of points to plot
lowerCamelCase__ = 0.0
while t <= 1:
lowerCamelCase__ = self.bezier_curve_function(_lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowerCamelCase__ = [i[0] for i in self.list_of_points]
lowerCamelCase__ = [i[1] for i in self.list_of_points]
plt.plot(
_lowerCAmelCase ,_lowerCAmelCase ,color="""blue""" ,label="""Curve of Degree """ + str(self.degree ) ,)
plt.scatter(_lowerCAmelCase ,_lowerCAmelCase ,color="""red""" ,label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 50 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = '▁'
_lowerCamelCase = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
_lowerCamelCase = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
_lowerCamelCase = {
'facebook/s2t-small-librispeech-asr': 1024,
}
_lowerCamelCase = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
_lowerCamelCase = {'mustc': MUSTC_LANGS}
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = MAX_MODEL_INPUT_SIZES
UpperCAmelCase__ = ["""input_ids""", """attention_mask"""]
UpperCAmelCase__ = []
def __init__( self , a__ , a__ , a__="<s>" , a__="</s>" , a__="<pad>" , a__="<unk>" , a__=False , a__=False , a__=None , a__=None , a__ = None , **a__ , ):
"""simple docstring"""
_lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , pad_token=a__ , do_upper_case=a__ , do_lower_case=a__ , tgt_lang=a__ , lang_codes=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
_lowerCamelCase : Optional[int] = do_upper_case
_lowerCamelCase : Optional[Any] = do_lower_case
_lowerCamelCase : Tuple = load_json(a__)
_lowerCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
_lowerCamelCase : Tuple = spm_file
_lowerCamelCase : Any = load_spm(a__ , self.sp_model_kwargs)
if lang_codes is not None:
_lowerCamelCase : List[Any] = lang_codes
_lowerCamelCase : List[str] = LANGUAGES[lang_codes]
_lowerCamelCase : Any = [F"""<lang:{lang}>""" for lang in self.langs]
_lowerCamelCase : Optional[Any] = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""") for lang in self.langs}
_lowerCamelCase : List[str] = self.lang_tokens
_lowerCamelCase : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang)
else:
_lowerCamelCase : Any = {}
@property
def __snake_case ( self):
"""simple docstring"""
return len(self.encoder)
@property
def __snake_case ( self):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Any = new_tgt_lang
self.set_tgt_lang_special_tokens(a__)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.lang_code_to_id[tgt_lang]
_lowerCamelCase : Any = [lang_code_id]
def __snake_case ( self , a__):
"""simple docstring"""
return self.sp_model.encode(a__ , out_type=a__)
def __snake_case ( self , a__):
"""simple docstring"""
return self.encoder.get(a__ , self.encoder[self.unk_token])
def __snake_case ( self , a__):
"""simple docstring"""
return self.decoder.get(a__ , self.unk_token)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_lowerCamelCase : List[Any] = self.sp_model.decode(a__)
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_lowerCamelCase : Optional[int] = []
else:
current_sub_tokens.append(a__)
_lowerCamelCase : Tuple = self.sp_model.decode(a__)
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __snake_case ( self , a__ , a__=None):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __snake_case ( self , a__ , a__ = None , a__ = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__)
_lowerCamelCase : Tuple = [1] * len(self.prefix_tokens)
_lowerCamelCase : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(a__)) + suffix_ones
return prefix_ones + ([0] * len(a__)) + ([0] * len(a__)) + suffix_ones
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.__dict__.copy()
_lowerCamelCase : str = None
return state
def __setstate__( self , a__):
"""simple docstring"""
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : Dict = load_spm(self.spm_file , self.sp_model_kwargs)
def __snake_case ( self , a__ , a__ = None):
"""simple docstring"""
_lowerCamelCase : str = Path(a__)
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
_lowerCamelCase : Any = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_lowerCamelCase : Optional[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , a__)
if os.path.abspath(self.spm_file) != os.path.abspath(a__) and os.path.isfile(self.spm_file):
copyfile(self.spm_file , a__)
elif not os.path.isfile(self.spm_file):
with open(a__ , '''wb''') as fi:
_lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(a__)
return (str(a__), str(a__))
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**lowercase_ )
spm.Load(str(lowercase_ ) )
return spm
def __UpperCAmelCase( lowercase_ ):
with open(lowercase_ , '''r''' ) as f:
return json.load(lowercase_ )
def __UpperCAmelCase( lowercase_ , lowercase_ ):
with open(lowercase_ , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ , indent=2 )
| 114 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ ={
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 |
from __future__ import annotations
from typing import Generic, TypeVar
UpperCAmelCase_ =TypeVar("""T""")
class __UpperCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self , UpperCAmelCase_ ):
lowerCAmelCase = data
lowerCAmelCase = self
lowerCAmelCase = 0
class __UpperCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
# map from node name to the node object
lowerCAmelCase = {}
def __snake_case ( self , UpperCAmelCase_ ):
# create a new set with x as its member
lowerCAmelCase = DisjointSetTreeNode(UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ ):
# find the set x belongs to (with path-compression)
lowerCAmelCase = self.map[data]
if elem_ref != elem_ref.parent:
lowerCAmelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
# helper function for union operation
if nodea.rank > nodea.rank:
lowerCAmelCase = nodea
else:
lowerCAmelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
# merge 2 disjoint sets
self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) )
class __UpperCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
# connections: map from the node to the neighbouring nodes (with weights)
lowerCAmelCase = {}
def __snake_case ( self , UpperCAmelCase_ ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
lowerCAmelCase = {}
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# add an edge with the given weight
self.add_node(UpperCAmelCase_ )
self.add_node(UpperCAmelCase_ )
lowerCAmelCase = weight
lowerCAmelCase = weight
def __snake_case ( self ):
lowerCAmelCase = []
lowerCAmelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda UpperCAmelCase_ : x[2] )
# creating the disjoint set
lowerCAmelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(UpperCAmelCase_ )
# MST generation
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edges[index]
index += 1
lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ )
lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ )
return graph
| 33 | 1 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a = '''src/diffusers'''
a = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
a = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
a = spec.loader.load_module()
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : int ) -> Union[str, Any]:
'''simple docstring'''
return line.startswith(_snake_case ) or len(_snake_case ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , _snake_case ) is not None
def _snake_case ( _snake_case : Optional[int] ) -> int:
'''simple docstring'''
_A = object_name.split('.' )
_A = 0
# First let's find the module where our object lives.
_A = parts[i]
while i < len(_snake_case ) and not os.path.isfile(os.path.join(_snake_case , F'''{module}.py''' ) ):
i += 1
if i < len(_snake_case ):
_A = os.path.join(_snake_case , parts[i] )
if i >= len(_snake_case ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(_snake_case , F'''{module}.py''' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
_A = f.readlines()
# Now let's find the class / func in the code!
_A = ''
_A = 0
for name in parts[i + 1 :]:
while (
line_index < len(_snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_snake_case ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_A = line_index
while line_index < len(_snake_case ) and _should_continue(lines[line_index] , _snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_A = lines[start_index:line_index]
return "".join(_snake_case )
a = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
a = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
a = re.compile(r'''<FILL\s+[^>]*>''')
def _snake_case ( _snake_case : Optional[int] ) -> Any:
'''simple docstring'''
_A = code.split('\n' )
_A = 0
while idx < len(_snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_snake_case ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def _snake_case ( _snake_case : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_A = len(get_indent(_snake_case ) ) > 0
if has_indent:
_A = F'''class Bla:\n{code}'''
_A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=_snake_case )
_A = black.format_str(_snake_case , mode=_snake_case )
_A , _A = style_docstrings_in_code(_snake_case )
return result[len('class Bla:\n' ) :] if has_indent else result
def _snake_case ( _snake_case : Tuple , _snake_case : List[Any]=False ) -> Tuple:
'''simple docstring'''
with open(_snake_case , 'r' , encoding='utf-8' , newline='\n' ) as f:
_A = f.readlines()
_A = []
_A = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_snake_case ):
_A = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_A , _A , _A = search.groups()
_A = find_code_in_diffusers(_snake_case )
_A = get_indent(_snake_case )
_A = line_index + 1 if indent == theoretical_indent else line_index + 2
_A = theoretical_indent
_A = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_A = True
while line_index < len(_snake_case ) and should_continue:
line_index += 1
if line_index >= len(_snake_case ):
break
_A = lines[line_index]
_A = _should_continue(_snake_case , _snake_case ) and re.search(F'''^{indent}# End copy''' , _snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_A = lines[start_index:line_index]
_A = ''.join(_snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
_A = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(_snake_case ) is None]
_A = '\n'.join(_snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(_snake_case ) > 0:
_A = replace_pattern.replace('with' , '' ).split(',' )
_A = [_re_replace_pattern.search(_snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_A , _A , _A = pattern.groups()
_A = re.sub(_snake_case , _snake_case , _snake_case )
if option.strip() == "all-casing":
_A = re.sub(obja.lower() , obja.lower() , _snake_case )
_A = re.sub(obja.upper() , obja.upper() , _snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_A = blackify(lines[start_index - 1] + theoretical_code )
_A = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_A = lines[:start_index] + [theoretical_code] + lines[line_index:]
_A = start_index + 1
if overwrite and len(_snake_case ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(_snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_snake_case )
return diffs
def _snake_case ( _snake_case : bool = False ) -> Optional[Any]:
'''simple docstring'''
_A = glob.glob(os.path.join(_snake_case , '**/*.py' ) , recursive=_snake_case )
_A = []
for filename in all_files:
_A = is_copy_consistent(_snake_case , _snake_case )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(_snake_case ) > 0:
_A = '\n'.join(_snake_case )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 7 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = '''speech_to_text'''
UpperCAmelCase : List[Any] = ['''past_key_values''']
UpperCAmelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int , _UpperCAmelCase : Union[str, Any]=10_000 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : int=2_048 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Tuple=2_048 , _UpperCAmelCase : str=4 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]="relu" , _UpperCAmelCase : List[Any]=256 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[str]=6_000 , _UpperCAmelCase : Optional[Any]=1_024 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Any=(5, 5) , _UpperCAmelCase : int=1_024 , _UpperCAmelCase : str=80 , _UpperCAmelCase : Any=1 , **_UpperCAmelCase : Tuple , ):
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(_UpperCAmelCase )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 7 | 1 |
"""simple docstring"""
import os
from distutils.util import strtobool
def __a ( A , A ):
'''simple docstring'''
for e in env_keys:
lowercase__ = int(os.environ.get(A , -1 ) )
if val >= 0:
return val
return default
def __a ( A , A=False ):
'''simple docstring'''
lowercase__ = os.environ.get(A , str(A ) )
return strtobool(A ) == 1 # As its name indicates `strtobool` actually returns an int...
def __a ( A , A="no" ):
'''simple docstring'''
lowercase__ = os.environ.get(A , str(A ) )
return value
| 668 | """simple docstring"""
from __future__ import annotations
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase_: Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
lowercase__ = []
for num in range(len(A ) ):
lowercase__ = 0
while 2 * i * i <= odd_composites[num]:
lowercase__ = odd_composites[num] - 2 * i * i
if is_prime(A ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(A ) == n:
return list_nums
return []
def __a ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 1 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list:
_a = len(_UpperCamelCase )
for i in range(1 , _UpperCamelCase ):
_a = collection[i]
_a = 0
_a = i - 1
while low <= high:
_a = (low + high) // 2
if val < collection[mid]:
_a = mid - 1
else:
_a = mid + 1
for j in range(_UpperCamelCase , _UpperCamelCase , -1 ):
_a = collection[j - 1]
_a = val
return collection
if __name__ == "__main__":
lowercase_ = input('Enter numbers separated by a comma:\n').strip()
lowercase_ = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 562 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class UpperCAmelCase ( unittest.TestCase ):
def _A ( self: Any ):
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='''utf-8''' , check=__UpperCamelCase , )
assert hasattr(self , '''env''' )
def _A ( self: Optional[int] , __UpperCamelCase: Optional[int]=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-single" , instance_count=__UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCamelCase , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def _A ( self: Optional[Any] , __UpperCamelCase: Optional[int] ):
TrainingJobAnalytics(__UpperCamelCase ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
def _A ( self: List[Any] ):
# create estimator
_a = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
_a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_a = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __UpperCamelCase )
| 487 | 0 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__snake_case : int = None
__snake_case : Any = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__snake_case : int = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__snake_case : List[Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE = TaTokenizer
SCREAMING_SNAKE_CASE = []
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: str=None , _SCREAMING_SNAKE_CASE: Tuple="</s>" , _SCREAMING_SNAKE_CASE: Tuple="<unk>" , _SCREAMING_SNAKE_CASE: Optional[int]="<pad>" , _SCREAMING_SNAKE_CASE: Any=100 , _SCREAMING_SNAKE_CASE: int=None , **_SCREAMING_SNAKE_CASE: Dict , ) -> Any:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase : Dict = [F"""<extra_id_{i}>""" for i in range(_SCREAMING_SNAKE_CASE)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__lowerCAmelCase : int = len(set(filter(lambda _SCREAMING_SNAKE_CASE: bool("extra_id_" in str(_SCREAMING_SNAKE_CASE)) , _SCREAMING_SNAKE_CASE)))
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , extra_ids=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[Any] = vocab_file
__lowerCAmelCase : Tuple = False if not self.vocab_file else True
__lowerCAmelCase : str = extra_ids
@staticmethod
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any]) -> Dict:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__lowerCAmelCase : List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , _SCREAMING_SNAKE_CASE , )
return max_model_length
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(_SCREAMING_SNAKE_CASE):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
__lowerCAmelCase : List[str] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(_SCREAMING_SNAKE_CASE):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE)
logger.info(F"""Copy vocab file to {out_vocab_file}""")
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__lowerCAmelCase : List[Any] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def _SCREAMING_SNAKE_CASE ( self: int) -> int:
"""simple docstring"""
return list(
set(filter(lambda _SCREAMING_SNAKE_CASE: bool(re.search(r"<extra_id_\d+>" , _SCREAMING_SNAKE_CASE)) is not None , self.additional_special_tokens)))
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Any:
"""simple docstring"""
return [self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE) for token in self.get_sentinel_tokens()] | 615 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : str = [0]
__lowerCAmelCase : Union[str, Any] = [0]
__lowerCAmelCase : List[Any] = len(_SCREAMING_SNAKE_CASE)
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , 0)
__lowerCAmelCase : Dict = [60]
__lowerCAmelCase : Optional[int] = [10]
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE)
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , 0)
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Dict = [1, 2, 3]
__lowerCAmelCase : str = [3, 2, 1]
__lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE)
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , 5)
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = 50
__lowerCAmelCase : Dict = [60, 100, 120]
__lowerCAmelCase : List[Any] = [10, 20, 30]
__lowerCAmelCase : int = len(_SCREAMING_SNAKE_CASE)
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , 220)
if __name__ == "__main__":
unittest.main() | 615 | 1 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 76 | """simple docstring"""
import os
import numpy
import onnx
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
__snake_case = a.name
__snake_case = b.name
__snake_case = ''''''
__snake_case = ''''''
__snake_case = a == b
__snake_case = name_a
__snake_case = name_b
return res
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
for i, input_name in enumerate(node_proto.input):
if input_name == name:
node_proto.input.insert(snake_case, snake_case)
node_proto.input.pop(i + 1)
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g, snake_case, snake_case)
_graph_replace_input_with(node_proto.attribute[1].g, snake_case, snake_case)
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g, snake_case, snake_case)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
for n in graph_proto.node:
_node_replace_input_with(snake_case, snake_case, snake_case)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
__snake_case = list(model.graph.initializer)
__snake_case = list(model_without_ext.graph.initializer)
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__snake_case = inits[i].name
__snake_case = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i])
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph, snake_case, snake_case)
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = os.path.dirname(snake_case)
__snake_case = os.path.basename(snake_case)
__snake_case = onnx.load(os.path.join(snake_case, snake_case))
__snake_case = list(model.graph.initializer)
__snake_case = set()
__snake_case = {}
__snake_case = []
__snake_case = 0
for i in range(len(snake_case)):
if i in dup_set:
continue
for j in range(i + 1, len(snake_case)):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i], inits[j]):
dup_set.add(snake_case)
dup_set.add(snake_case)
__snake_case = inits[j].data_type
__snake_case = numpy.prod(inits[j].dims)
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''', snake_case)
total_reduced_size += mem_size
__snake_case = inits[i].name
__snake_case = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(snake_case)
else:
__snake_case = [name_j]
ind_to_replace.append((j, i))
print('''total reduced size: ''', total_reduced_size / 10_24 / 10_24 / 10_24, '''GB''')
__snake_case = sorted(snake_case)
_remove_dup_initializers_from_model(snake_case, snake_case, snake_case)
__snake_case = '''optimized_''' + model_file_name
__snake_case = os.path.join(snake_case, snake_case)
onnx.save(snake_case, snake_case)
return new_model | 564 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __lowerCAmelCase ( __a ):
snake_case : Optional[Any] = """Wav2Vec2FeatureExtractor"""
snake_case : int = """AutoTokenizer"""
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ ):
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = self.feature_extractor
_UpperCAmelCase : List[Any] = False
@classmethod
def snake_case_ (cls , lowerCAmelCase__ , **lowerCAmelCase__ ):
try:
return super().from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
except OSError:
warnings.warn(
F"Loading a tokenizer inside {cls.__name__} from a config that does not"
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , lowerCAmelCase__ , )
_UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Any = WavaVecaCTCTokenizer.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
return cls(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
def __call__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
_UpperCAmelCase : Tuple = kwargs.pop("""raw_speech""" )
else:
_UpperCAmelCase : List[str] = kwargs.pop("""audio""" , lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = kwargs.pop("""sampling_rate""" , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = kwargs.pop("""text""" , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
_UpperCAmelCase : Dict = args[0]
_UpperCAmelCase : Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
_UpperCAmelCase : Dict = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCAmelCase : Dict = encodings["""input_ids"""]
return inputs
def snake_case_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = kwargs.pop("""input_features""" , lowerCAmelCase__ )
_UpperCAmelCase : Any = kwargs.pop("""labels""" , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
_UpperCAmelCase : Dict = args[0]
_UpperCAmelCase : Optional[int] = args[1:]
if input_features is not None:
_UpperCAmelCase : Tuple = self.feature_extractor.pad(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
if labels is not None:
_UpperCAmelCase : Union[str, Any] = self.tokenizer.pad(lowerCAmelCase__ , **lowerCAmelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_UpperCAmelCase : Tuple = labels["""input_ids"""]
return input_features
def snake_case_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@contextmanager
def snake_case_ (self ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : List[str] = self.tokenizer
yield
_UpperCAmelCase : Any = self.feature_extractor
_UpperCAmelCase : Union[str, Any] = False
| 156 |
'''simple docstring'''
from collections import defaultdict
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : List[Any] = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase_ )
if ret % 2 == 0:
cuts.append(lowerCAmelCase_ )
return ret
def __A ( ):
dfs(1 )
if __name__ == "__main__":
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = 10, 9
lowerCAmelCase_ : Union[str, Any] = defaultdict(list)
lowerCAmelCase_ : dict[int, bool] = {}
lowerCAmelCase_ : list[int] = []
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Dict = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 156 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __a ( _snake_case, unittest.TestCase ):
# TODO: is there an appropriate internal test set?
__UpperCamelCase : Optional[Any] = 'ssube/stable-diffusion-x4-upscaler-onnx'
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Optional[int]=0 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 128, 128) ,rng=random.Random(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
__SCREAMING_SNAKE_CASE = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
__SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
__SCREAMING_SNAKE_CASE = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
__SCREAMING_SNAKE_CASE = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __a ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ort.SessionOptions()
__SCREAMING_SNAKE_CASE = False
return options
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__SCREAMING_SNAKE_CASE = init_image.resize((128, 128) )
# using the PNDM scheduler by default
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """A fantasy landscape, trending on artstation"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase ,image=lowerCamelCase ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=lowerCamelCase ,output_type="""np""" ,)
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__SCREAMING_SNAKE_CASE = init_image.resize((128, 128) )
__SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" ,subfolder="""scheduler""" )
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" ,scheduler=lowerCamelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """A fantasy landscape, trending on artstation"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase ,image=lowerCamelCase ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=lowerCamelCase ,output_type="""np""" ,)
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 109 |
'''simple docstring'''
from PIL import Image
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Image:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (259 * (level + 255)) / (255 * (259 - level))
def contrast(__UpperCAmelCase ) -> int:
return int(128 + factor * (c - 128) )
return img.point(__UpperCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
a = change_contrast(img, 170)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 109 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = '''▁'''
A = {'''vocab_file''': '''sentencepiece.bpe.model'''}
A = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
A = {
'''facebook/mbart-large-en-ro''': 1_024,
'''facebook/mbart-large-cc25''': 1_024,
}
# fmt: off
A = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = []
__lowerCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
__a : List[str] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
__a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
__a : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__a : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a : List[Any] = 1
__a : Dict = len(self.sp_model )
__a : Any = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCAmelCase )
}
__a : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
__a : Optional[int] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__a : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__a : List[Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__a : Any = src_lang if src_lang is not None else '''en_XX'''
__a : List[Any] = self.lang_code_to_id[self._src_lang]
__a : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
__a : Dict = self.__dict__.copy()
__a : str = None
__a : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _UpperCAmelCase ):
__a : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__a : str = {}
__a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _lowerCamelCase ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowerCamelCase ( self ):
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__a : List[str] = [1] * len(self.prefix_tokens )
__a : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Union[str, Any] = [self.sep_token_id]
__a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__a : Optional[int] = src_lang
__a : List[Any] = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
__a : Any = self.convert_tokens_to_ids(_UpperCAmelCase )
__a : Any = tgt_lang_id
return inputs
def _lowerCamelCase ( self ):
__a : int = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a : Any = self.sp_model.PieceToId(_UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCamelCase ( self , _UpperCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Dict = ''''''.join(_UpperCAmelCase ).replace(_UpperCAmelCase , ''' ''' ).strip()
return out_string
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , '''wb''' ) as fi:
__a : str = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = "en_XX" , _UpperCAmelCase = None , _UpperCAmelCase = "ro_RO" , **_UpperCAmelCase , ):
__a : List[Any] = src_lang
__a : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def _lowerCamelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Union[str, Any] = self.lang_code_to_id[src_lang]
__a : int = []
__a : str = [self.eos_token_id, self.cur_lang_code]
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Tuple = self.lang_code_to_id[lang]
__a : Optional[Any] = []
__a : Dict = [self.eos_token_id, self.cur_lang_code] | 101 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
A = 300 # TEMPERATURE (unit = K)
def __A ( a_ :float , a_ :float , a_ :float , ) -> float:
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''')
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''')
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''')
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''')
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''')
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 101 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_a : Any = logging.get_logger(__name__)
_a : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_a : Optional[int] = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
_a : List[Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
_a : List[str] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[Any] = SqueezeBertTokenizer
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[Any]="[UNK]" , SCREAMING_SNAKE_CASE_ : Tuple="[SEP]" , SCREAMING_SNAKE_CASE_ : List[Any]="[PAD]" , SCREAMING_SNAKE_CASE_ : Tuple="[CLS]" , SCREAMING_SNAKE_CASE_ : Tuple="[MASK]" , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
__snake_case = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
__snake_case = do_lower_case
__snake_case = strip_accents
__snake_case = tokenize_chinese_chars
__snake_case = normalizer_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = do_lower_case
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any]=None ) -> List[str]:
__snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
__snake_case = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 56 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class A_ ( _a ):
'''simple docstring'''
a__ = "switch_transformers"
a__ = ["past_key_values"]
a__ = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__(self , lowercase__=32_128 , lowercase__=768 , lowercase__=64 , lowercase__=2_048 , lowercase__=64 , lowercase__=12 , lowercase__=3 , lowercase__=12 , lowercase__=3 , lowercase__=12 , lowercase__=8 , lowercase__=False , lowercase__=0.01 , lowercase__="float32" , lowercase__=False , lowercase__=32 , lowercase__=128 , lowercase__=0.1 , lowercase__=1E-6 , lowercase__=0.001 , lowercase__=0.001 , lowercase__=1.0 , lowercase__="relu" , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=0 , lowercase__=1 , **lowercase__ , ) -> Dict:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = d_model
__UpperCAmelCase = d_kv
__UpperCAmelCase = d_ff
__UpperCAmelCase = num_sparse_encoder_layers
__UpperCAmelCase = num_layers
__UpperCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__UpperCAmelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__UpperCAmelCase = self.num_layers // self.num_sparse_encoder_layers
else:
__UpperCAmelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__UpperCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__UpperCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
__UpperCAmelCase = num_heads
__UpperCAmelCase = num_experts
__UpperCAmelCase = expert_capacity
__UpperCAmelCase = router_bias
__UpperCAmelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__UpperCAmelCase = router_dtype
__UpperCAmelCase = router_ignore_padding_tokens
__UpperCAmelCase = relative_attention_num_buckets
__UpperCAmelCase = relative_attention_max_distance
__UpperCAmelCase = dropout_rate
__UpperCAmelCase = layer_norm_epsilon
__UpperCAmelCase = initializer_factor
__UpperCAmelCase = feed_forward_proj
__UpperCAmelCase = use_cache
__UpperCAmelCase = add_router_probs
__UpperCAmelCase = router_z_loss_coef
__UpperCAmelCase = router_aux_loss_coef
__UpperCAmelCase = self.feed_forward_proj.split('''-''' )
__UpperCAmelCase = act_info[-1]
__UpperCAmelCase = act_info[0] == '''gated'''
if len(lowercase__ ) > 1 and act_info[0] != "gated" or len(lowercase__ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__UpperCAmelCase = '''gelu_new'''
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , **lowercase__ , )
| 303 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase ):
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
__snake_case : Tuple = sorted(string.lower() )
return len(__lowerCamelCase ) == len(set(__lowerCamelCase ) )
if __name__ == "__main__":
_snake_case : Optional[Any] = input("Enter a string ").strip()
_snake_case : Tuple = is_isogram(input_str)
print(f'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 203 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_snake_case : List[str] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] ) -> None:
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 203 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
snake_case__ : Dict = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
snake_case__ : Tuple = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
snake_case__ : List[str] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def lowercase ( _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = len([g for position, g in enumerate(_lowerCAmelCase ) if g == main_target[position]] )
return (item, float(_lowerCAmelCase ))
def lowercase ( _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = random.randint(0 , len(_lowerCAmelCase ) - 1 )
UpperCAmelCase__ = parent_a[:random_slice] + parent_a[random_slice:]
UpperCAmelCase__ = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase ( _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = list(_lowerCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
UpperCAmelCase__ = random.choice(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
UpperCAmelCase__ = []
# Generate more children proportionally to the fitness score.
UpperCAmelCase__ = int(parent_a[1] * 100 ) + 1
UpperCAmelCase__ = 10 if child_n >= 10 else child_n
for _ in range(_lowerCAmelCase ):
UpperCAmelCase__ = population_score[random.randint(0 , _lowerCAmelCase )][0]
UpperCAmelCase__ , UpperCAmelCase__ = crossover(parent_a[0] , _lowerCAmelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCAmelCase , _lowerCAmelCase ) )
pop.append(mutate(_lowerCAmelCase , _lowerCAmelCase ) )
return pop
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
UpperCAmelCase__ = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(_lowerCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCAmelCase__ = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
UpperCAmelCase__ = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(_lowerCAmelCase )
# Generate random starting population.
UpperCAmelCase__ = []
for _ in range(_lowerCAmelCase ):
population.append("""""".join([random.choice(_lowerCAmelCase ) for i in range(len(_lowerCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
UpperCAmelCase__ , UpperCAmelCase__ = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCAmelCase__ = [evaluate(_lowerCAmelCase , _lowerCAmelCase ) for item in population]
# Check if there is a matching evolution.
UpperCAmelCase__ = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] , reverse=_lowerCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCAmelCase__ = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCAmelCase )
# Normalize population score to be between 0 and 1.
UpperCAmelCase__ = [
(item, score / len(_lowerCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCAmelCase ):
population.extend(select(population_score[int(_lowerCAmelCase )] , _lowerCAmelCase , _lowerCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
snake_case__ : List[str] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
snake_case__ : Tuple = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
snake_case__ , snake_case__ , snake_case__ : Tuple = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 392 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class snake_case ( _snake_case ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = "yolos"
def __init__( self : Dict , lowerCamelCase_ : Optional[int]=768 , lowerCamelCase_ : int=12 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=3072 , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , lowerCamelCase_ : Dict=1E-12 , lowerCamelCase_ : int=[512, 864] , lowerCamelCase_ : Any=16 , lowerCamelCase_ : str=3 , lowerCamelCase_ : str=True , lowerCamelCase_ : int=100 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Dict=1 , lowerCamelCase_ : Optional[int]=5 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : Optional[Any]=0.1 , **lowerCamelCase_ : Dict , ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = num_detection_tokens
UpperCAmelCase__ = use_mid_position_embeddings
UpperCAmelCase__ = auxiliary_loss
# Hungarian matcher
UpperCAmelCase__ = class_cost
UpperCAmelCase__ = bbox_cost
UpperCAmelCase__ = giou_cost
# Loss coefficients
UpperCAmelCase__ = bbox_loss_coefficient
UpperCAmelCase__ = giou_loss_coefficient
UpperCAmelCase__ = eos_coefficient
class snake_case ( _snake_case ):
'''simple docstring'''
UpperCamelCase__ : List[str] = version.parse("1.11" )
@property
def UpperCAmelCase ( self : Tuple ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase ( self : int ) ->float:
'''simple docstring'''
return 1E-4
@property
def UpperCAmelCase ( self : int ) ->int:
'''simple docstring'''
return 12
| 392 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ) -> List[str]:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
UpperCAmelCase_ : Union[str, Any] = int(input('Enter number: ').strip())
print(f'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''') | 720 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 35 |
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : List[str] = num_of_nodes
SCREAMING_SNAKE_CASE__ : list[list[int]] = []
SCREAMING_SNAKE_CASE__ : dict[int, int] = {}
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Optional[int] , _lowercase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[Any] , _lowercase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE__ : Any = self.find_component(_lowercase )
def lowercase__ ( self : int , _lowercase : list[int] , _lowercase : int , _lowercase : int ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.find_component(_lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE__ : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE__ : Tuple = self.m_component[u]
SCREAMING_SNAKE_CASE__ : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE__ : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = edge
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[u]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase , _lowercase , _lowercase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :int = '''new-model'''
if is_tf_available():
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Optional[Any] = NewModelConfig
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = 'bert-base-cased'
UpperCAmelCase_ : Optional[int] = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
UpperCAmelCase_ : List[Any] = TFAutoModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = 'bert-base-cased'
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
UpperCAmelCase_ : Tuple = TFAutoModelForPreTraining.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
UpperCAmelCase_ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = TFAutoModelForCausalLM.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
UpperCAmelCase_ : Optional[int] = TFAutoModelWithLMHead.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
UpperCAmelCase_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = TFAutoModelForMaskedLM.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
UpperCAmelCase_ : str = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
UpperCAmelCase_ : int = TFAutoModelForSequenceClassification.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
UpperCAmelCase_ : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
@require_tensorflow_probability
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
UpperCAmelCase_ : Tuple = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
UpperCAmelCase_ : int = TFAutoModelForTableQuestionAnswering.from_pretrained(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = TFAutoModelForTableQuestionAnswering.from_pretrained(
snake_case_ , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = TFAutoModelWithLMHead.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=snake_case_ ) , 1_4_4_1_0 )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : int = TFAutoModelWithLMHead.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=snake_case_ ) , 1_4_4_1_0 )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(snake_case_ , snake_case_ )
UpperCAmelCase_ : List[Any] = copy.deepcopy(model.config )
UpperCAmelCase_ : Union[str, Any] = ['FunnelBaseModel']
UpperCAmelCase_ : Union[str, Any] = TFAutoModel.from_config(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case_ )
UpperCAmelCase_ : Optional[Any] = TFAutoModel.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register('new-model' , snake_case_ )
UpperCAmelCase_ : Tuple = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(snake_case_ ):
auto_class.register(snake_case_ , snake_case_ )
auto_class.register(snake_case_ , snake_case_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case_ ):
auto_class.register(snake_case_ , snake_case_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase_ : int = BertModelTester(self ).get_config()
UpperCAmelCase_ : Any = NewModelConfig(**tiny_config.to_dict() )
UpperCAmelCase_ : List[Any] = auto_class.from_config(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case_ )
UpperCAmelCase_ : Optional[int] = auto_class.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _UpperCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase_ : str = TFAutoModel.from_pretrained('bert-base' )
def _UpperCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase_ : str = TFAutoModel.from_pretrained(snake_case_ , revision='aaaaaa' )
def _UpperCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
UpperCAmelCase_ : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def _UpperCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(snake_case_ , 'Use `from_pt=True` to load this model' ):
UpperCAmelCase_ : str = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
UpperCAmelCase_ : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
UpperCAmelCase_ : Union[str, Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
UpperCAmelCase_ : int = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 389 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : int = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Optional[int] = '''mgp-str'''
def __init__( self , snake_case_=[3_2, 1_2_8] , snake_case_=4 , snake_case_=3 , snake_case_=2_7 , snake_case_=3_8 , snake_case_=5_0_2_5_7 , snake_case_=3_0_5_2_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=4.0 , snake_case_=True , snake_case_=False , snake_case_=1E-5 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=False , snake_case_=0.02 , **snake_case_ , ):
'''simple docstring'''
super().__init__(**snake_case_ )
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Any = patch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : int = max_token_length
UpperCAmelCase_ : Union[str, Any] = num_character_labels
UpperCAmelCase_ : Union[str, Any] = num_bpe_labels
UpperCAmelCase_ : Optional[int] = num_wordpiece_labels
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : Any = distilled
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[Any] = drop_rate
UpperCAmelCase_ : Optional[Any] = qkv_bias
UpperCAmelCase_ : List[str] = attn_drop_rate
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : List[Any] = output_aa_attentions
UpperCAmelCase_ : Optional[int] = initializer_range
| 389 | 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def A ( ) -> Dict:
UpperCamelCase__ :Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
UpperCamelCase__ :List[str] = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(lowercase__ )
DownloadCommand.register_subcommand(lowercase__ )
EnvironmentCommand.register_subcommand(lowercase__ )
RunCommand.register_subcommand(lowercase__ )
ServeCommand.register_subcommand(lowercase__ )
UserCommands.register_subcommand(lowercase__ )
AddNewModelCommand.register_subcommand(lowercase__ )
AddNewModelLikeCommand.register_subcommand(lowercase__ )
LfsCommands.register_subcommand(lowercase__ )
PTtoTFCommand.register_subcommand(lowercase__ )
# Let's go
UpperCamelCase__ :Union[str, Any] = parser.parse_args()
if not hasattr(lowercase__ , """func""" ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase__ :Optional[Any] = args.func(lowercase__ )
service.run()
if __name__ == "__main__":
main() | 45 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
A__ : Optional[int] = ['''torch''', '''scipy''']
def __init__( self : Any , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Any ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def __UpperCAmelCase ( cls : Dict , *__lowerCamelCase : List[str] , **__lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def __UpperCAmelCase ( cls : int , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
| 103 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: list[list] ):
__SCREAMING_SNAKE_CASE : Optional[Any] = current_set.copy()
for row_index, row in enumerate(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = row[0]
for column_index, column in enumerate(_lowerCamelCase ):
if magnitude == 0:
__SCREAMING_SNAKE_CASE : Any = column
continue
__SCREAMING_SNAKE_CASE : Optional[int] = column / magnitude
# Subtract to cancel term
__SCREAMING_SNAKE_CASE : Tuple = current_set[0]
__SCREAMING_SNAKE_CASE : Tuple = [first_row]
__SCREAMING_SNAKE_CASE : Union[str, Any] = current_set[1::]
for row in current_set:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_lowerCamelCase )
continue
for column_index in range(len(_lowerCamelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_lowerCamelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__SCREAMING_SNAKE_CASE : str = final_set[0]
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__SCREAMING_SNAKE_CASE : Any = simplify(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = resultant
return final_set
def lowerCAmelCase_ ( _lowerCamelCase: list[list] ):
if len(_lowerCamelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
__SCREAMING_SNAKE_CASE : Optional[int] = len(_lowerCamelCase ) + 1
if any(len(_lowerCamelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_lowerCamelCase , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_lowerCamelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
__SCREAMING_SNAKE_CASE : Tuple = equations.copy()
if any(0 in row for row in data_set ):
__SCREAMING_SNAKE_CASE : Any = data_set.copy()
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for row_index, row in enumerate(_lowerCamelCase ):
if 0 not in row:
__SCREAMING_SNAKE_CASE : Dict = data_set.pop(_lowerCamelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = data_set.copy()
__SCREAMING_SNAKE_CASE : List[Any] = simplify(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = simplified[::-1]
__SCREAMING_SNAKE_CASE : list = []
for row in simplified:
__SCREAMING_SNAKE_CASE : List[str] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__SCREAMING_SNAKE_CASE : int = row.copy()[: len(_lowerCamelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_lowerCamelCase ) == 0:
solutions.append(0 )
continue
__SCREAMING_SNAKE_CASE : Tuple = temp_row[1::]
__SCREAMING_SNAKE_CASE : Dict = temp_row[::-1]
for column_index, column in enumerate(_lowerCamelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = []
for item in solutions:
final.append(float(round(_lowerCamelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : List[Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]])) | 701 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , lowerCAmelCase__ : int = 1_2_8 , lowerCAmelCase__ : int = 2_5_6 , lowerCAmelCase__ : float = 20_00.0 , lowerCAmelCase__ : int = 7_6_8 , lowerCAmelCase__ : int = 1_2 , lowerCAmelCase__ : int = 1_2 , lowerCAmelCase__ : int = 6_4 , lowerCAmelCase__ : int = 2_0_4_8 , lowerCAmelCase__ : float = 0.1 , ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
nn.Linear(lowerCAmelCase__ , d_model * 4 , bias=lowerCAmelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase__ ) , nn.SiLU() , )
__SCREAMING_SNAKE_CASE : Dict = nn.Embedding(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = nn.Dropout(p=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList()
for lyr_num in range(lowerCAmelCase__ ):
# FiLM conditional T5 decoder
__SCREAMING_SNAKE_CASE : Optional[Any] = DecoderLayer(d_model=lowerCAmelCase__ , d_kv=lowerCAmelCase__ , num_heads=lowerCAmelCase__ , d_ff=lowerCAmelCase__ , dropout_rate=lowerCAmelCase__ )
self.decoders.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = nn.Dropout(p=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__SCREAMING_SNAKE_CASE : Optional[int] = self.conditioning_emb(lowerCAmelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__SCREAMING_SNAKE_CASE : Tuple = torch.broadcast_to(
torch.arange(lowerCAmelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__SCREAMING_SNAKE_CASE : str = self.position_encoding(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.continuous_inputs_projection(lowerCAmelCase__ )
inputs += position_encodings
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dropout(lowerCAmelCase__ )
# decoder: No padding present.
__SCREAMING_SNAKE_CASE : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__SCREAMING_SNAKE_CASE : List[Any] = [(x, self.encoder_decoder_mask(lowerCAmelCase__ , lowerCAmelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__SCREAMING_SNAKE_CASE : List[str] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__SCREAMING_SNAKE_CASE : Dict = lyr(
lowerCAmelCase__ , conditioning_emb=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )[0]
__SCREAMING_SNAKE_CASE : List[str] = self.decoder_norm(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.post_dropout(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = self.spec_out(lowerCAmelCase__ )
return spec_out
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=1E-6 ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase__ , d_kv=lowerCAmelCase__ , num_heads=lowerCAmelCase__ , dropout_rate=lowerCAmelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase__ , d_kv=lowerCAmelCase__ , num_heads=lowerCAmelCase__ , dropout_rate=lowerCAmelCase__ , layer_norm_epsilon=lowerCAmelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase__ , d_ff=lowerCAmelCase__ , dropout_rate=lowerCAmelCase__ , layer_norm_epsilon=lowerCAmelCase__ ) )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Optional[int]=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.layer[0](
lowerCAmelCase__ , conditioning_emb=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , )
if encoder_hidden_states is not None:
__SCREAMING_SNAKE_CASE : List[str] = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.layer[1](
lowerCAmelCase__ , key_value_states=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , )
# Apply Film Conditional Feed Forward layer
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.layer[-1](lowerCAmelCase__ , lowerCAmelCase__ )
return (hidden_states,)
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : List[Any] = TaLayerNorm(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = Attention(query_dim=lowerCAmelCase__ , heads=lowerCAmelCase__ , dim_head=lowerCAmelCase__ , out_bias=lowerCAmelCase__ , scale_qk=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(lowerCAmelCase__ )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : int=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(lowerCAmelCase__ )
if conditioning_emb is not None:
__SCREAMING_SNAKE_CASE : str = self.FiLMLayer(lowerCAmelCase__ , lowerCAmelCase__ )
# Self-attention block
__SCREAMING_SNAKE_CASE : List[Any] = self.attention(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = hidden_states + self.dropout(lowerCAmelCase__ )
return hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = Attention(query_dim=lowerCAmelCase__ , heads=lowerCAmelCase__ , dim_head=lowerCAmelCase__ , out_bias=lowerCAmelCase__ , scale_qk=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = TaLayerNorm(lowerCAmelCase__ , eps=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Any=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.layer_norm(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.attention(
lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
__SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(lowerCAmelCase__ )
return layer_output
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Any = TaDenseGatedActDense(d_model=lowerCAmelCase__ , d_ff=lowerCAmelCase__ , dropout_rate=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(lowerCAmelCase__ , eps=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = nn.Dropout(lowerCAmelCase__ )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.layer_norm(lowerCAmelCase__ )
if conditioning_emb is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.film(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self.DenseReluDense(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = hidden_states + self.dropout(lowerCAmelCase__ )
return hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Tuple = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = NewGELUActivation()
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.act(self.wi_a(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Any = self.wi_a(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_gelu * hidden_linear
__SCREAMING_SNAKE_CASE : List[Any] = self.dropout(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.wo(lowerCAmelCase__ )
return hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple=1E-6 ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : str = eps
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : torch.Tensor ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase__ , 3.0 )) ))
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Tuple = nn.Linear(lowerCAmelCase__ , out_features * 2 , bias=lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.scale_bias(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = torch.chunk(lowerCAmelCase__ , 2 , -1 )
__SCREAMING_SNAKE_CASE : Dict = x * (1 + scale) + shift
return x | 178 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
UpperCAmelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
UpperCAmelCase = tokenizer('Hello there' , return_tensors='tf' ).input_ids
UpperCAmelCase = tokenizer('Hi I am' , return_tensors='tf' ).input_ids
UpperCAmelCase = model(a_ , labels=a_ ).loss
UpperCAmelCase = -tf.math.reduce_mean(a_ ).numpy()
UpperCAmelCase = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 447 |
'''simple docstring'''
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int = 1000 ):
UpperCAmelCase = -1
UpperCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCAmelCase = n - a - b
if c * c == (a * a + b * b):
UpperCAmelCase = a * b * c
if candidate >= product:
UpperCAmelCase = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 447 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase = DebertaTokenizer
UpperCAmelCase = True
UpperCAmelCase = DebertaTokenizerFast
def lowercase_ ( self :int ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase__ : int = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
lowerCamelCase__ : Optional[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase__ : Optional[Any] = {'''unk_token''': '''[UNK]'''}
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def lowercase_ ( self :List[str] ,**__UpperCAmelCase :Dict ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**__UpperCAmelCase )
def lowercase_ ( self :Optional[Any] ,__UpperCAmelCase :List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = '''lower newer'''
lowerCamelCase__ : Optional[int] = '''lower newer'''
return input_text, output_text
def lowercase_ ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Tuple = self.get_tokenizer()
lowerCamelCase__ : int = '''lower newer'''
lowerCamelCase__ : Optional[int] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase__ : Tuple = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
lowerCamelCase__ : Optional[int] = tokens + [tokenizer.unk_token]
lowerCamelCase__ : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) ,__UpperCAmelCase )
def lowercase_ ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = tokenizer('''Hello''' ,'''World''' )
lowerCamelCase__ : Dict = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] ,__UpperCAmelCase )
@slow
def lowercase_ ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Dict = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase__ : Tuple = tokenizer.encode('''sequence builders''' ,add_special_tokens=__UpperCAmelCase )
lowerCamelCase__ : List[Any] = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=__UpperCAmelCase )
lowerCamelCase__ : Tuple = tokenizer.encode(
'''sequence builders''' ,add_special_tokens=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase )
lowerCamelCase__ : str = tokenizer.encode(
'''sequence builders''' ,'''multi-sequence build''' ,add_special_tokens=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase )
lowerCamelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
lowerCamelCase__ : Tuple = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase ,__UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowercase_ ( self :str ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : int = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase__ : int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase__ : str = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase__ : Any = tokenizer(__UpperCAmelCase ,padding=__UpperCAmelCase )
lowerCamelCase__ : int = [tokenizer.decode(__UpperCAmelCase ,skip_special_tokens=__UpperCAmelCase ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase__ : Any = {
'''input_ids''': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase__ : Optional[Any] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data ,__UpperCAmelCase )
for expected, decoded in zip(__UpperCAmelCase ,__UpperCAmelCase ):
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
| 121 | """simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
UpperCAmelCase : Dict[Optional[str], str] = {}
UpperCAmelCase : Dict[Optional[str], Exception] = {}
def __a ( _lowercase , _lowercase , _lowercase = None , ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
lowerCamelCase__ : Tuple = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
lowerCamelCase__ : Any = format_type
def __a ( _lowercase , _lowercase , _lowercase = None ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowerCamelCase__ : str = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
UpperCAmelCase : Optional[int] = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
UpperCAmelCase : Dict = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
UpperCAmelCase : Union[str, Any] = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def __a ( _lowercase ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __a ( _lowercase , **_lowercase ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = get_format_type_from_alias(_lowercase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_lowercase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 121 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase ={
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]:
a_ = size if size is not None else {'height': 18, 'width': 18}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = apply_ocr
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# with apply_OCR = True
a_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
a_ = Image.open(ds[0]['file'] ).convert('RGB' )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 697 | 1 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="None" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ (self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = DebertaVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = DebertaVaForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = DebertaVaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = DebertaVaForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def UpperCAmelCase_ (self ):
UpperCamelCase__ = DebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase_ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase_ (self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DebertaVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase_ (self ):
pass
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
UpperCamelCase__ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
UpperCamelCase__ = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
| 86 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ (self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = BioGptForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
# create attention mask
UpperCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.seq_length // 2
UpperCamelCase__ = 0
# first forward pass
UpperCamelCase__ , UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCamelCase__ = ids_tensor((1,) , SCREAMING_SNAKE_CASE_ ).item() + 1
UpperCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCamelCase__ = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )] , dim=1 , )
# get two different outputs
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""]
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""]
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
# first forward pass
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""]
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[
"""last_hidden_state"""
]
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
UpperCamelCase__ = BioGptForCausalLM(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = BioGptModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = BioGptForTokenClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (BioGptForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
def UpperCAmelCase_ (self ):
UpperCamelCase__ = BioGptModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase_ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*SCREAMING_SNAKE_CASE_ , gradient_checkpointing=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
UpperCamelCase__ = """left"""
# Define PAD Token = EOS Token = 50256
UpperCamelCase__ = tokenizer.eos_token
UpperCamelCase__ = model.config.eos_token_id
# use different length sentences to test batching
UpperCamelCase__ = [
"""Hello, my dog is a little""",
"""Today, I""",
]
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(
input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs["""attention_mask"""].to(SCREAMING_SNAKE_CASE_ ) , )
UpperCamelCase__ = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
UpperCamelCase__ = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_length=model.config.max_length - num_paddings )
UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase_ (self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = BioGptModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = 3
UpperCamelCase__ = input_dict["""input_ids"""]
UpperCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase__ = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = 3
UpperCamelCase__ = """multi_label_classification"""
UpperCamelCase__ = input_dict["""input_ids"""]
UpperCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase__ = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __A( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
UpperCamelCase__ = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase__ = 4_23_84
UpperCamelCase__ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(SCREAMING_SNAKE_CASE_ )
torch.manual_seed(0 )
UpperCamelCase__ = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(
**SCREAMING_SNAKE_CASE_ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 86 | 1 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
UpperCAmelCase__ : Tuple = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCAmelCase__ : Any = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 407 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ = 2_0_0_0_0_0_0 ):
UpperCAmelCase__ : Any = [0 for i in range(n + 1 )]
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : List[str] = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : Any = 0
for i in range(UpperCamelCase__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""") | 407 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a : Optional[Any] = logging.get_logger(__name__)
__a : List[Any] = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "detr"
lowercase = ["past_key_values"]
lowercase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] , snake_case_ : str=True , snake_case_ : Any=None , snake_case_ : int=3 , snake_case_ : Optional[int]=100 , snake_case_ : int=6 , snake_case_ : Union[str, Any]=2_048 , snake_case_ : Any=8 , snake_case_ : Tuple=6 , snake_case_ : List[Any]=2_048 , snake_case_ : int=8 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : int=0.0 , snake_case_ : int=True , snake_case_ : List[Any]="relu" , snake_case_ : Any=256 , snake_case_ : Tuple=0.1 , snake_case_ : List[str]=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : Tuple=0.02 , snake_case_ : Dict=1.0 , snake_case_ : Optional[int]=False , snake_case_ : Optional[Any]="sine" , snake_case_ : List[str]="resnet50" , snake_case_ : Optional[int]=True , snake_case_ : Union[str, Any]=False , snake_case_ : Optional[int]=1 , snake_case_ : List[str]=5 , snake_case_ : List[str]=2 , snake_case_ : Dict=1 , snake_case_ : Optional[Any]=1 , snake_case_ : Optional[Any]=5 , snake_case_ : int=2 , snake_case_ : str=0.1 , **snake_case_ : str , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case__ : Tuple = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(snake_case_ , snake_case_ ):
snake_case__ : int = backbone_config.get("""model_type""" )
snake_case__ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
snake_case__ : List[Any] = config_class.from_dict(snake_case_ )
# set timm attributes to None
snake_case__ : Tuple = None, None, None
snake_case__ : List[Any] = use_timm_backbone
snake_case__ : List[Any] = backbone_config
snake_case__ : Dict = num_channels
snake_case__ : Any = num_queries
snake_case__ : Optional[int] = d_model
snake_case__ : Any = encoder_ffn_dim
snake_case__ : Optional[Any] = encoder_layers
snake_case__ : Any = encoder_attention_heads
snake_case__ : Optional[Any] = decoder_ffn_dim
snake_case__ : Optional[Any] = decoder_layers
snake_case__ : List[str] = decoder_attention_heads
snake_case__ : str = dropout
snake_case__ : List[Any] = attention_dropout
snake_case__ : int = activation_dropout
snake_case__ : str = activation_function
snake_case__ : Tuple = init_std
snake_case__ : List[Any] = init_xavier_std
snake_case__ : str = encoder_layerdrop
snake_case__ : Union[str, Any] = decoder_layerdrop
snake_case__ : Tuple = encoder_layers
snake_case__ : List[str] = auxiliary_loss
snake_case__ : int = position_embedding_type
snake_case__ : List[str] = backbone
snake_case__ : List[Any] = use_pretrained_backbone
snake_case__ : Any = dilation
# Hungarian matcher
snake_case__ : List[str] = class_cost
snake_case__ : Union[str, Any] = bbox_cost
snake_case__ : List[str] = giou_cost
# Loss coefficients
snake_case__ : Dict = mask_loss_coefficient
snake_case__ : str = dice_loss_coefficient
snake_case__ : Any = bbox_loss_coefficient
snake_case__ : List[Any] = giou_loss_coefficient
snake_case__ : Dict = eos_coefficient
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowerCamelCase ( self : int ):
return self.encoder_attention_heads
@property
def lowerCamelCase ( self : Any ):
return self.d_model
@classmethod
def lowerCamelCase ( cls : Union[str, Any] , snake_case_ : PretrainedConfig , **snake_case_ : Optional[int] ):
return cls(backbone_config=snake_case_ , **snake_case_ )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : int = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case__ : Optional[Any] = self.backbone_config.to_dict()
snake_case__ : Any = self.__class__.model_type
return output
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = version.parse("1.11" )
@property
def lowerCamelCase ( self : Union[str, Any] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowerCamelCase ( self : Optional[int] ):
return 1E-5
@property
def lowerCamelCase ( self : Union[str, Any] ):
return 12
| 715 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> list:
snake_case__ : List[str] = int(_lowerCAmelCase )
if n_element < 1:
snake_case__ : List[Any] = ValueError("""a should be a positive number""" )
raise my_error
snake_case__ : str = [1]
snake_case__ , snake_case__ , snake_case__ : Optional[int] = (0, 0, 0)
snake_case__ : Union[str, Any] = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__a = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
__a = hamming(int(n))
print("-----------------------------------------------------")
print(F"The list with nth numbers is: {hamming_numbers}")
print("-----------------------------------------------------")
| 301 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = LongformerTokenizer
UpperCamelCase : int = True
UpperCamelCase : List[Any] = LongformerTokenizerFast
UpperCamelCase : List[Any] = True
def __A ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCamelCase = dict(zip(A , range(len(A ) ) ) )
lowerCamelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCamelCase = {"""unk_token""": """<unk>"""}
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def __A ( self , **A ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def __A ( self , **A ) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def __A ( self , A ) -> Tuple:
'''simple docstring'''
lowerCamelCase = """lower newer"""
lowerCamelCase = """lower newer"""
return input_text, output_text
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase = """lower newer"""
lowerCamelCase = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowerCamelCase = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A , A )
lowerCamelCase = tokens + [tokenizer.unk_token]
lowerCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
lowerCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=A )
lowerCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A )
lowerCamelCase = tokenizer.encode(
"""sequence builders""" , add_special_tokens=A , add_prefix_space=A )
lowerCamelCase = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A )
lowerCamelCase = tokenizer.build_inputs_with_special_tokens(A )
lowerCamelCase = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = """Encode this sequence."""
lowerCamelCase = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
lowerCamelCase = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A , A )
lowerCamelCase = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A , A )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
lowerCamelCase = tokenizer.encode(A , add_special_tokens=A )
lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A , A )
# Testing spaces after special tokens
lowerCamelCase = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space
lowerCamelCase = tokenizer.convert_tokens_to_ids(A )
lowerCamelCase = """Encode <mask> sequence"""
lowerCamelCase = """Encode <mask>sequence"""
lowerCamelCase = tokenizer.encode(A )
lowerCamelCase = encoded.index(A )
lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A , A )
lowerCamelCase = tokenizer.encode(A )
lowerCamelCase = encoded.index(A )
lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A , A )
def __A ( self ) -> Any:
'''simple docstring'''
pass
def __A ( self ) -> Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase = self.rust_tokenizer_class.from_pretrained(A , **A )
lowerCamelCase = self.tokenizer_class.from_pretrained(A , **A )
lowerCamelCase = """A, <mask> AllenNLP sentence."""
lowerCamelCase = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
lowerCamelCase = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowerCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowerCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __A ( self ) -> Any:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A )
lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""trim_offsets"""] , A )
def __A ( self ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCamelCase = F'{text_of_1_token} {text_of_1_token}'
lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
lowerCamelCase = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
lowerCamelCase = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
lowerCamelCase = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
lowerCamelCase = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
lowerCamelCase = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
lowerCamelCase = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , )
lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
lowerCamelCase = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
lowerCamelCase = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
| 457 |
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __lowerCamelCase ( lowerCamelCase__ : int = 5000 ):
'''simple docstring'''
lowerCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCamelCase__ )]
for i, pentagonal_i in enumerate(lowerCamelCase__ ):
for j in range(lowerCamelCase__ , len(lowerCamelCase__ ) ):
lowerCamelCase = pentagonal_nums[j]
lowerCamelCase = pentagonal_i + pentagonal_j
lowerCamelCase = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCamelCase__ ) and is_pentagonal(lowerCamelCase__ ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 457 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class A__ ( UpperCamelCase__ ):
lowerCAmelCase__ : str = """biogpt"""
def __init__( self : Optional[Any] , _UpperCAmelCase : Optional[int]=4_23_84 , _UpperCAmelCase : Dict=10_24 , _UpperCAmelCase : List[Any]=24 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : str=40_96 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : str=1e-1_2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : int=1 , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : List[str]=2 , **_UpperCAmelCase : List[Any] , ) -> Any:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = scale_embedding
__lowercase = use_cache
__lowercase = layerdrop
__lowercase = activation_dropout
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 717 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = ["image_processor", "tokenizer"]
lowerCAmelCase__ : Union[str, Any] = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ : Union[str, Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features['words']
__lowercase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__lowercase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowercase = images
return encoded_inputs
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def a__ ( self : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 688 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : Union[str, Any] = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : str = 'gpt_neo'
lowerCamelCase : str = ['past_key_values']
lowerCamelCase : Optional[int] = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , SCREAMING_SNAKE_CASE_=5_02_57 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_="gelu_new" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , **SCREAMING_SNAKE_CASE_ , ) -> Any:
__lowerCamelCase : List[Any] = vocab_size
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Union[str, Any] = hidden_size
__lowerCamelCase : List[Any] = num_layers
__lowerCamelCase : Optional[Any] = num_heads
__lowerCamelCase : int = intermediate_size
__lowerCamelCase : int = window_size
__lowerCamelCase : Optional[Any] = activation_function
__lowerCamelCase : List[str] = resid_dropout
__lowerCamelCase : Any = embed_dropout
__lowerCamelCase : Union[str, Any] = attention_dropout
__lowerCamelCase : Union[str, Any] = classifier_dropout
__lowerCamelCase : Optional[int] = layer_norm_epsilon
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Union[str, Any] = use_cache
__lowerCamelCase : Dict = bos_token_id
__lowerCamelCase : List[Any] = eos_token_id
__lowerCamelCase : List[str] = attention_types
__lowerCamelCase : Dict = self.expand_attention_types_params(SCREAMING_SNAKE_CASE_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
f'`config.num_layers = {self.num_layers}`. '
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@staticmethod
def lowercase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Dict = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
import torch
__lowerCamelCase : Optional[Any] = input.size()
__lowerCamelCase : Dict = len(UpperCAmelCase_ )
__lowerCamelCase : Any = shape[dimension]
__lowerCamelCase : Tuple = torch.arange(0 , UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = torch.div(sizedim - size , UpperCAmelCase_ , rounding_mode='floor' ) + 1
__lowerCamelCase : int = torch.arange(UpperCAmelCase_ ) + low_indices[:min_length][:, None]
__lowerCamelCase : Tuple = [slice(UpperCAmelCase_ )] * rank
__lowerCamelCase : Union[str, Any] = indices
__lowerCamelCase : Optional[int] = input[s]
__lowerCamelCase : Optional[Any] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ) -> Dict:
import torch
__lowerCamelCase : Optional[Any] = torch.arange(1 , UpperCAmelCase_ )
__lowerCamelCase : List[Any] = torch.remainder(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[Any] = remainders == 0
__lowerCamelCase : Dict = candidates[divisor_indices]
__lowerCamelCase : Dict = torch.max(UpperCAmelCase_ )
return largest_divisor, torch.div(UpperCAmelCase_ , UpperCAmelCase_ , rounding_mode='floor' )
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
__lowerCamelCase : List[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='inputs' )
__lowerCamelCase : Optional[int] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowerCamelCase : Any = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowercase_ ( self ) -> int:
return self._config.num_heads
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , ) -> Mapping[str, Any]:
__lowerCamelCase : List[Any] = super(SCREAMING_SNAKE_CASE_ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
__lowerCamelCase : str = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCamelCase , __lowerCamelCase : Any = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowerCamelCase : Optional[int] = seqlen + 2
__lowerCamelCase : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCamelCase : List[Any] = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
__lowerCamelCase : List[Any] = common_inputs['attention_mask']
if self.use_past:
__lowerCamelCase : Any = ordered_inputs['attention_mask'].dtype
__lowerCamelCase : Union[str, Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 )
return ordered_inputs
@property
def lowercase_ ( self ) -> int:
return 13
| 13 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Any = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
lowerCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __lowerCAmelCase ( __snake_case ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowerCAmelCase = model_type_to_module_name(__snake_case )
__lowerCAmelCase = importlib.import_module(F""".{module_name}""" , "transformers.models" )
try:
return getattr(__snake_case , __snake_case )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__snake_case , "__name__" , __snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowerCAmelCase = importlib.import_module("transformers" )
if hasattr(__snake_case , __snake_case ):
return getattr(__snake_case , __snake_case )
return None
def __lowerCAmelCase ( __snake_case , __snake_case = None , __snake_case = False , __snake_case = False , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = False , **__snake_case , ):
__lowerCAmelCase = get_file_from_repo(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(__snake_case , encoding="utf-8" ) as reader:
return json.load(__snake_case )
class _UpperCamelCase :
def __init__( self )-> Union[str, Any]:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(__UpperCamelCase )
def __UpperCAmelCase ( cls , __UpperCamelCase , **__UpperCamelCase )-> Optional[int]:
__lowerCAmelCase = kwargs.pop("config" , __UpperCamelCase )
__lowerCAmelCase = kwargs.pop("trust_remote_code" , __UpperCamelCase )
__lowerCAmelCase = True
__lowerCAmelCase , __lowerCAmelCase = FeatureExtractionMixin.get_feature_extractor_dict(__UpperCamelCase , **__UpperCamelCase )
__lowerCAmelCase = config_dict.get("feature_extractor_type" , __UpperCamelCase )
__lowerCAmelCase = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
__lowerCAmelCase = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowerCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
# It could be in `config.feature_extractor_type``
__lowerCAmelCase = getattr(__UpperCamelCase , "feature_extractor_type" , __UpperCamelCase )
if hasattr(__UpperCamelCase , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
__lowerCAmelCase = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
__lowerCAmelCase = feature_extractor_class_from_name(__UpperCamelCase )
__lowerCAmelCase = feature_extractor_auto_map is not None
__lowerCAmelCase = feature_extractor_class is not None or type(__UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING
__lowerCAmelCase = resolve_trust_remote_code(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if has_remote_code and trust_remote_code:
__lowerCAmelCase = get_class_from_dynamic_module(
__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
__lowerCAmelCase = kwargs.pop("code_revision" , __UpperCamelCase )
if os.path.isdir(__UpperCamelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__UpperCamelCase , **__UpperCamelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__UpperCamelCase , **__UpperCamelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING:
__lowerCAmelCase = FEATURE_EXTRACTOR_MAPPING[type(__UpperCamelCase )]
return feature_extractor_class.from_dict(__UpperCamelCase , **__UpperCamelCase )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
FEATURE_EXTRACTOR_MAPPING.register(__UpperCamelCase , __UpperCamelCase )
| 367 | 0 |
"""simple docstring"""
from pathlib import Path
import fire
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = Path(_snake_case )
UpperCAmelCase = Path(_snake_case )
dest_dir.mkdir(exist_ok=_snake_case )
for path in src_dir.iterdir():
UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
UpperCAmelCase = dest_dir.joinpath(path.name )
print(_snake_case )
dest_path.open("""w""" ).write("""\n""".join(_snake_case ) )
if __name__ == "__main__":
fire.Fire(minify)
| 74 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCamelCase__ :
def __init__( self ,A ):
UpperCAmelCase = data
# Initialize hash values
UpperCAmelCase = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
UpperCAmelCase = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
UpperCAmelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCamelCase ( A ):
UpperCAmelCase = b"""\x80""" + (b"""\x00""" * (63 - (len(A ) + 8) % 64))
UpperCAmelCase = struct.pack(""">Q""" ,(len(A ) * 8) )
return data + padding + big_endian_integer
def _UpperCamelCase ( self ):
# Convert into blocks of 64 bytes
UpperCAmelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase = list(struct.unpack(""">16L""" ,A ) )
# add 48 0-ed integers
words += [0] * 48
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
UpperCAmelCase = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
UpperCAmelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
UpperCAmelCase = self.ror(A ,6 ) ^ self.ror(A ,11 ) ^ self.ror(A ,25 )
UpperCAmelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
UpperCAmelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
UpperCAmelCase = self.ror(A ,2 ) ^ self.ror(A ,13 ) ^ self.ror(A ,22 )
UpperCAmelCase = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase = (sa + maj) % 0x1_00_00_00_00
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
UpperCAmelCase = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase = """""".join([hex(A )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCamelCase ( self ,A ,A ):
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
import hashlib
UpperCAmelCase = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(A ).hash ,hashlib.shaaaa(A ).hexdigest() )
def _a ( ):
"""simple docstring"""
import doctest
doctest.testmod()
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = bytes(_snake_case , """utf-8""" )
print(SHAaaa(_snake_case ).hash )
if __name__ == "__main__":
main()
| 74 | 1 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a__ : List[str] = sys.version_info >= (3, 10)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : str=None ) -> List[Any]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_A )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =42
_lowerCamelCase =42
_lowerCamelCase =42
_lowerCamelCase =42
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =42
_lowerCamelCase =field(default="toto" , metadata={"help": "help message"} )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =False
_lowerCamelCase =True
_lowerCamelCase =None
class lowerCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
_lowerCamelCase ="titi"
_lowerCamelCase ="toto"
class lowerCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
_lowerCamelCase ="titi"
_lowerCamelCase ="toto"
_lowerCamelCase =42
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase ="toto"
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = BasicEnum(self.foo )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase ="toto"
def __snake_case ( self : List[Any] ):
UpperCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =None
_lowerCamelCase =field(default=__UpperCamelCase , metadata={"help": "help message"} )
_lowerCamelCase =None
_lowerCamelCase =list_field(default=[] )
_lowerCamelCase =list_field(default=[] )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =list_field(default=[] )
_lowerCamelCase =list_field(default=[1, 2, 3] )
_lowerCamelCase =list_field(default=["Hallo", "Bonjour", "Hello"] )
_lowerCamelCase =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =field()
_lowerCamelCase =field()
_lowerCamelCase =field()
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = BasicEnum(self.required_enum )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =42
_lowerCamelCase =field()
_lowerCamelCase =None
_lowerCamelCase =field(default="toto" , metadata={"help": "help message"} )
_lowerCamelCase =list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =False
_lowerCamelCase =True
_lowerCamelCase =None
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =None
_lowerCamelCase =field(default=__UpperCamelCase , metadata={"help": "help message"} )
_lowerCamelCase =None
_lowerCamelCase =list_field(default=[] )
_lowerCamelCase =list_field(default=[] )
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : List[str] , a__ : Optional[Any] , a__ : int ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCAmelCase = {k: v for k, v in vars(a__ ).items() if k != "container"}
UpperCAmelCase = {k: v for k, v in vars(a__ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , a__ ) and yy.get('''choices''' , a__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](a__ ) , yy['''type'''](a__ ) )
del xx["type"], yy["type"]
self.assertEqual(a__ , a__ )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = HfArgumentParser(a__ )
UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=a__ , required=a__ )
expected.add_argument('''--bar''' , type=a__ , required=a__ )
expected.add_argument('''--baz''' , type=a__ , required=a__ )
expected.add_argument('''--flag''' , type=a__ , default=a__ , const=a__ , nargs='''?''' )
self.argparsersEqual(a__ , a__ )
UpperCAmelCase = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
(UpperCAmelCase ) = parser.parse_args_into_dataclasses(a__ , look_for_args_file=a__ )
self.assertFalse(example.flag )
def __snake_case ( self : List[str] ):
UpperCAmelCase = HfArgumentParser(a__ )
UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=a__ )
expected.add_argument('''--baz''' , default='''toto''' , type=a__ , help='''help message''' )
self.argparsersEqual(a__ , a__ )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=a__ , default=a__ , const=a__ , nargs='''?''' )
expected.add_argument('''--baz''' , type=a__ , default=a__ , const=a__ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=a__ , dest='''baz''' )
expected.add_argument('''--opt''' , type=a__ , default=a__ )
UpperCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(a__ )
for dataclass_type in dataclass_types:
UpperCAmelCase = HfArgumentParser(a__ )
self.argparsersEqual(a__ , a__ )
UpperCAmelCase = parser.parse_args([] )
self.assertEqual(a__ , Namespace(foo=a__ , baz=a__ , opt=a__ ) )
UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(a__ , Namespace(foo=a__ , baz=a__ , opt=a__ ) )
UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(a__ , Namespace(foo=a__ , baz=a__ , opt=a__ ) )
UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(a__ , Namespace(foo=a__ , baz=a__ , opt=a__ ) )
UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(a__ , Namespace(foo=a__ , baz=a__ , opt=a__ ) )
def __snake_case ( self : Tuple ):
UpperCAmelCase = HfArgumentParser(a__ )
UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(a__ , a__ )
UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __snake_case ( self : str ):
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase ="toto"
UpperCAmelCase = HfArgumentParser(a__ )
UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(a__ , a__ )
UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def __snake_case ( self : str ):
UpperCAmelCase = HfArgumentParser(a__ )
UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=a__ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=a__ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=a__ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=a__ )
self.argparsersEqual(a__ , a__ )
UpperCAmelCase = parser.parse_args([] )
self.assertEqual(
a__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(a__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def __snake_case ( self : int ):
UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=a__ , type=a__ )
expected.add_argument('''--bar''' , default=a__ , type=a__ , help='''help message''' )
expected.add_argument('''--baz''' , default=a__ , type=a__ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=a__ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=a__ )
UpperCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(a__ )
for dataclass_type in dataclass_types:
UpperCAmelCase = HfArgumentParser(a__ )
self.argparsersEqual(a__ , a__ )
UpperCAmelCase = parser.parse_args([] )
self.assertEqual(a__ , Namespace(foo=a__ , bar=a__ , baz=a__ , ces=[] , des=[] ) )
UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(a__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = HfArgumentParser(a__ )
UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=a__ , required=a__ )
expected.add_argument('''--required_str''' , type=a__ , required=a__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=a__ , )
self.argparsersEqual(a__ , a__ )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = HfArgumentParser(a__ )
UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=a__ , required=a__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=a__ , )
expected.add_argument('''--opt''' , type=a__ , default=a__ )
expected.add_argument('''--baz''' , default='''toto''' , type=a__ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=a__ )
self.argparsersEqual(a__ , a__ )
def __snake_case ( self : str ):
UpperCAmelCase = HfArgumentParser(a__ )
UpperCAmelCase = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
UpperCAmelCase = parser.parse_dict(a__ )[0]
UpperCAmelCase = BasicExample(**a__ )
self.assertEqual(a__ , a__ )
def __snake_case ( self : Any ):
UpperCAmelCase = HfArgumentParser(a__ )
UpperCAmelCase = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(a__ , parser.parse_dict , a__ , allow_extra_keys=a__ )
def __snake_case ( self : List[str] ):
UpperCAmelCase = HfArgumentParser(a__ )
UpperCAmelCase = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(a__ , '''temp_json''' )
os.mkdir(a__ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(a__ , a__ )
UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
UpperCAmelCase = BasicExample(**a__ )
self.assertEqual(a__ , a__ )
def __snake_case ( self : List[str] ):
UpperCAmelCase = HfArgumentParser(a__ )
UpperCAmelCase = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(a__ , '''temp_yaml''' )
os.mkdir(a__ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(a__ , a__ )
UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
UpperCAmelCase = BasicExample(**a__ )
self.assertEqual(a__ , a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = HfArgumentParser(a__ )
self.assertIsNotNone(a__ )
| 51 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : Any = logging.get_logger(__name__)
def A ( _A ):
"""simple docstring"""
snake_case_ :List[str] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
snake_case_ :List[str] = 128
elif "12-12" in model_name:
snake_case_ :Union[str, Any] = 12
snake_case_ :Optional[Any] = 12
elif "14-14" in model_name:
snake_case_ :Any = 14
snake_case_ :Union[str, Any] = 14
elif "16-16" in model_name:
snake_case_ :List[Any] = 16
snake_case_ :Dict = 16
else:
raise ValueError("Model not supported" )
snake_case_ :Tuple = "huggingface/label-files"
if "speech-commands" in model_name:
snake_case_ :Union[str, Any] = 35
snake_case_ :Union[str, Any] = "speech-commands-v2-id2label.json"
else:
snake_case_ :Union[str, Any] = 527
snake_case_ :List[str] = "audioset-id2label.json"
snake_case_ :Any = json.load(open(hf_hub_download(_A, _A, repo_type="dataset" ), "r" ) )
snake_case_ :Union[str, Any] = {int(_A ): v for k, v in idalabel.items()}
snake_case_ :Union[str, Any] = idalabel
snake_case_ :Any = {v: k for k, v in idalabel.items()}
return config
def A ( _A ):
"""simple docstring"""
if "module.v" in name:
snake_case_ :Optional[Any] = name.replace("module.v", "audio_spectrogram_transformer" )
if "cls_token" in name:
snake_case_ :Optional[Any] = name.replace("cls_token", "embeddings.cls_token" )
if "dist_token" in name:
snake_case_ :str = name.replace("dist_token", "embeddings.distillation_token" )
if "pos_embed" in name:
snake_case_ :Union[str, Any] = name.replace("pos_embed", "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
snake_case_ :List[str] = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
snake_case_ :Optional[Any] = name.replace("blocks", "encoder.layer" )
if "attn.proj" in name:
snake_case_ :List[Any] = name.replace("attn.proj", "attention.output.dense" )
if "attn" in name:
snake_case_ :Optional[Any] = name.replace("attn", "attention.self" )
if "norm1" in name:
snake_case_ :Tuple = name.replace("norm1", "layernorm_before" )
if "norm2" in name:
snake_case_ :Dict = name.replace("norm2", "layernorm_after" )
if "mlp.fc1" in name:
snake_case_ :int = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
snake_case_ :int = name.replace("mlp.fc2", "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
snake_case_ :Optional[int] = name.replace("audio_spectrogram_transformer.norm", "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
snake_case_ :Any = name.replace("module.mlp_head.0", "classifier.layernorm" )
if "module.mlp_head.1" in name:
snake_case_ :str = name.replace("module.mlp_head.1", "classifier.dense" )
return name
def A ( _A, _A ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case_ :int = orig_state_dict.pop(_A )
if "qkv" in key:
snake_case_ :Optional[Any] = key.split("." )
snake_case_ :Union[str, Any] = int(key_split[3] )
snake_case_ :str = config.hidden_size
if "weight" in key:
snake_case_ :Any = val[:dim, :]
snake_case_ :Optional[Any] = val[dim : dim * 2, :]
snake_case_ :List[Any] = val[-dim:, :]
else:
snake_case_ :str = val[:dim]
snake_case_ :Optional[Any] = val[dim : dim * 2]
snake_case_ :Any = val[-dim:]
else:
snake_case_ :int = val
return orig_state_dict
def A ( _A ):
"""simple docstring"""
snake_case_ :Any = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(_A, _A )
@torch.no_grad()
def A ( _A, _A, _A=False ):
"""simple docstring"""
snake_case_ :Any = get_audio_spectrogram_transformer_config(_A )
snake_case_ :List[str] = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
snake_case_ :Optional[int] = model_name_to_url[model_name]
snake_case_ :str = torch.hub.load_state_dict_from_url(_A, map_location="cpu" )
# remove some keys
remove_keys(_A )
# rename some keys
snake_case_ :int = convert_state_dict(_A, _A )
# load 🤗 model
snake_case_ :str = ASTForAudioClassification(_A )
model.eval()
model.load_state_dict(_A )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
snake_case_ :int = -4.2_677_393 if "speech-commands" not in model_name else -6.845_978
snake_case_ :Union[str, Any] = 4.5_689_974 if "speech-commands" not in model_name else 5.5_654_526
snake_case_ :Union[str, Any] = 1_024 if "speech-commands" not in model_name else 128
snake_case_ :Optional[int] = ASTFeatureExtractor(mean=_A, std=_A, max_length=_A )
if "speech-commands" in model_name:
snake_case_ :Optional[int] = load_dataset("speech_commands", "v0.02", split="validation" )
snake_case_ :int = dataset[0]["audio"]["array"]
else:
snake_case_ :Any = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint", filename="sample_audio.flac", repo_type="dataset", )
snake_case_ , snake_case_ :List[Any] = torchaudio.load(_A )
snake_case_ :Optional[int] = waveform.squeeze().numpy()
snake_case_ :Any = feature_extractor(_A, sampling_rate=16_000, return_tensors="pt" )
# forward pass
snake_case_ :Dict = model(**_A )
snake_case_ :List[str] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
snake_case_ :Optional[int] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
snake_case_ :Any = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
snake_case_ :List[Any] = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
snake_case_ :Dict = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
snake_case_ :int = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
snake_case_ :Dict = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
snake_case_ :List[str] = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
snake_case_ :Optional[Any] = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3], _A, atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_A ).mkdir(exist_ok=_A )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_A )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(_A )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 584 | 0 |
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for a, b in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertAlmostEqual(lowerCamelCase__ , lowerCamelCase__ , delta=lowerCamelCase__ )
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowerCamelCase__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def _snake_case ( self ):
__UpperCAmelCase : int = None
ops.enable_eager_execution_internal()
__UpperCAmelCase : Optional[int] = tf.config.list_physical_devices("CPU" )
if len(lowerCamelCase__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__UpperCAmelCase : List[Any] = tf.config.list_logical_devices(device_type="CPU" )
__UpperCAmelCase : Union[str, Any] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__UpperCAmelCase : Dict = GradientAccumulator()
__UpperCAmelCase : Optional[Any] = tf.Variable([4.0, 3.0] )
__UpperCAmelCase : int = create_optimizer(5E-5 , 10 , 5 )
__UpperCAmelCase : Optional[int] = tf.Variable([0.0, 0.0] , trainable=lowerCamelCase__ )
def accumulate_on_replica(UpperCamelCase_ ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(UpperCamelCase_ , UpperCamelCase_ ):
with strategy.scope():
__UpperCAmelCase : Union[str, Any] = strategy.experimental_local_results(lowerCamelCase__ )
local_variables[0].assign(lowerCamelCase__ )
local_variables[1].assign(lowerCamelCase__ )
strategy.run(lowerCamelCase__ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowerCamelCase__ )
def _check_local_values(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Any = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , lowerCamelCase__ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , lowerCamelCase__ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 717 | '''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
__UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
__UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] )
__UpperCAmelCase : Any = ""
__UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] )
__UpperCAmelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 10 | 0 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowerCAmelCase_ : Optional[Any] = logging.getLogger(__name__)
class __lowerCAmelCase ( __a ):
snake_case : List[Any] = """summarization"""
snake_case : Any = ["""loss"""]
snake_case : Tuple = ROUGE_KEYS
snake_case : Union[str, Any] = """rouge2"""
def __init__(self , lowerCAmelCase__ , **lowerCAmelCase__ ):
if hparams.sortish_sampler and hparams.gpus > 1:
_UpperCAmelCase : Tuple = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowerCAmelCase__ , num_labels=lowerCAmelCase__ , mode=self.mode , **lowerCAmelCase__ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
_UpperCAmelCase : List[str] = Path(self.output_dir ) / """metrics.json"""
_UpperCAmelCase : Optional[int] = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Union[str, Any] = defaultdict(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = self.config.model_type
_UpperCAmelCase : Optional[Any] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
_UpperCAmelCase : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_UpperCAmelCase : Optional[int] = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
_UpperCAmelCase : Optional[Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_UpperCAmelCase : int = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], F"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_UpperCAmelCase : Optional[int] = get_git_info()["""repo_sha"""]
_UpperCAmelCase : int = hparams.num_workers
_UpperCAmelCase : Union[str, Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCAmelCase__ ):
_UpperCAmelCase : List[Any] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_UpperCAmelCase : Optional[int] = self.decoder_start_token_id
_UpperCAmelCase : List[str] = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : List[Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_UpperCAmelCase : Dict = self.hparams.eval_max_gen_length
else:
_UpperCAmelCase : Tuple = self.model.config.max_length
_UpperCAmelCase : Any = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCAmelCase__ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
_UpperCAmelCase : List[str] = True
return readable_batch
def snake_case_ (self , lowerCAmelCase__ , **lowerCAmelCase__ ):
return self.model(lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = self.tokenizer.batch_decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
return lmap(str.strip , lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = self.tokenizer.pad_token_id
_UpperCAmelCase , _UpperCAmelCase : List[Any] = batch["""input_ids"""], batch["""attention_mask"""]
_UpperCAmelCase : Optional[Any] = batch["""labels"""]
if isinstance(self.model , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = self.model._shift_right(lowerCAmelCase__ )
else:
_UpperCAmelCase : List[Any] = shift_tokens_right(lowerCAmelCase__ , lowerCAmelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_UpperCAmelCase : List[Any] = decoder_input_ids
self.save_readable_batch(lowerCAmelCase__ )
_UpperCAmelCase : Any = self(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_UpperCAmelCase : List[Any] = nn.CrossEntropyLoss(ignore_index=lowerCAmelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
_UpperCAmelCase : Optional[Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_UpperCAmelCase : Union[str, Any] = nn.functional.log_softmax(lowerCAmelCase__ , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = label_smoothed_nll_loss(
lowerCAmelCase__ , lowerCAmelCase__ , self.hparams.label_smoothing , ignore_index=lowerCAmelCase__ )
return (loss,)
@property
def snake_case_ (self ):
return self.tokenizer.pad_token_id
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = self._step(lowerCAmelCase__ )
_UpperCAmelCase : Dict = dict(zip(self.loss_names , lowerCAmelCase__ ) )
# tokens per batch
_UpperCAmelCase : int = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
_UpperCAmelCase : List[str] = batch["""input_ids"""].shape[0]
_UpperCAmelCase : int = batch["""input_ids"""].eq(self.pad ).sum()
_UpperCAmelCase : Union[str, Any] = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
return self._generative_step(lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__="val" ):
self.step_count += 1
_UpperCAmelCase : List[str] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_UpperCAmelCase : Union[str, Any] = losses["""loss"""]
_UpperCAmelCase : Any = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
_UpperCAmelCase : Union[str, Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_UpperCAmelCase : torch.FloatTensor = torch.tensor(lowerCAmelCase__ ).type_as(lowerCAmelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCAmelCase__ )
_UpperCAmelCase : Any = {F"{prefix}_avg_{k}": x for k, x in losses.items()}
_UpperCAmelCase : List[str] = self.step_count
self.metrics[prefix].append(lowerCAmelCase__ ) # callback writes this to self.metrics_save_path
_UpperCAmelCase : List[Any] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"{prefix}_loss": loss,
F"{prefix}_{self.val_metric}": metric_tensor,
}
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
return calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_UpperCAmelCase : str = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowerCAmelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_UpperCAmelCase : Tuple = (time.time() - ta) / batch["""input_ids"""].shape[0]
_UpperCAmelCase : List[str] = self.ids_to_clean_text(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = self.ids_to_clean_text(batch["""labels"""] )
_UpperCAmelCase : Tuple = self._step(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = dict(zip(self.loss_names , lowerCAmelCase__ ) )
_UpperCAmelCase : Dict = self.calc_generative_metrics(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = np.mean(lmap(lowerCAmelCase__ , lowerCAmelCase__ ) )
base_metrics.update(gen_time=lowerCAmelCase__ , gen_len=lowerCAmelCase__ , preds=lowerCAmelCase__ , target=lowerCAmelCase__ , **lowerCAmelCase__ )
return base_metrics
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
return self._generative_step(lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ ):
return self.validation_epoch_end(lowerCAmelCase__ , prefix="""test""" )
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : List[Any] = self.n_obs[type_path]
_UpperCAmelCase : Any = self.target_lens[type_path]
_UpperCAmelCase : Any = self.dataset_class(
self.tokenizer , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , max_target_length=lowerCAmelCase__ , **self.dataset_kwargs , )
return dataset
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
_UpperCAmelCase : Optional[int] = self.get_dataset(lowerCAmelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_UpperCAmelCase : Union[str, Any] = dataset.make_sortish_sampler(lowerCAmelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_UpperCAmelCase : Dict = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_sampler=lowerCAmelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
def snake_case_ (self ):
_UpperCAmelCase : Tuple = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowerCAmelCase__ )
return dataloader
def snake_case_ (self ):
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def snake_case_ (self ):
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def snake_case_ (lowerCAmelCase__ , lowerCAmelCase__ ):
BaseTransformer.add_model_specific_args(lowerCAmelCase__ , lowerCAmelCase__ )
add_generic_args(lowerCAmelCase__ , lowerCAmelCase__ )
parser.add_argument(
"""--max_source_length""" , default=1_0_2_4 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=5_6 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_4_2 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_4_2 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowerCAmelCase__ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowerCAmelCase__ )
parser.add_argument("""--max_tokens_per_batch""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument("""--logger_name""" , type=lowerCAmelCase__ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowerCAmelCase__ , default=5_0_0 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowerCAmelCase__ , default="""summarization""" , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowerCAmelCase__ , default=0.0 , required=lowerCAmelCase__ )
parser.add_argument("""--src_lang""" , type=lowerCAmelCase__ , default="""""" , required=lowerCAmelCase__ )
parser.add_argument("""--tgt_lang""" , type=lowerCAmelCase__ , default="""""" , required=lowerCAmelCase__ )
parser.add_argument("""--eval_beams""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument(
"""--val_metric""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class __lowerCAmelCase ( __a ):
snake_case : Optional[Any] = """translation"""
snake_case : Any = ["""loss"""]
snake_case : str = ["""bleu"""]
snake_case : Optional[int] = """bleu"""
def __init__(self , lowerCAmelCase__ , **lowerCAmelCase__ ):
super().__init__(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Tuple = hparams.src_lang
_UpperCAmelCase : Any = hparams.tgt_lang
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
return calculate_bleu(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_=None ):
Path(args.output_dir ).mkdir(exist_ok=lowerCAmelCase_ )
check_output_dir(lowerCAmelCase_ , expected_items=3 )
if model is None:
if "summarization" in args.task:
_UpperCAmelCase : SummarizationModule = SummarizationModule(lowerCAmelCase_ )
else:
_UpperCAmelCase : SummarizationModule = TranslationModule(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
_UpperCAmelCase : Optional[Any] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_UpperCAmelCase : int = os.environ.get("""WANDB_PROJECT""" , lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = WandbLogger(name=model.output_dir.name , project=lowerCAmelCase_ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_UpperCAmelCase : Optional[int] = WandbLogger(name=model.output_dir.name , project=f"hf_{dataset}" )
if args.early_stopping_patience >= 0:
_UpperCAmelCase : Optional[int] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : List[Any] = args.val_metric == """loss"""
_UpperCAmelCase : pl.Trainer = generic_train(
lowerCAmelCase_ , lowerCAmelCase_ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , lowerCAmelCase_ ) , early_stopping_callback=lowerCAmelCase_ , logger=lowerCAmelCase_ , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
_UpperCAmelCase : str = """"""
_UpperCAmelCase : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=lowerCAmelCase_ ) )
if checkpoints:
_UpperCAmelCase : Dict = checkpoints[-1]
_UpperCAmelCase : str = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowerCAmelCase_ : Optional[int] = argparse.ArgumentParser()
lowerCAmelCase_ : Optional[int] = pl.Trainer.add_argparse_args(parser)
lowerCAmelCase_ : int = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowerCAmelCase_ : Any = parser.parse_args()
main(args)
| 414 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __lowerCAmelCase ( __a ):
snake_case : Union[List[PIL.Image.Image], np.ndarray]
snake_case : Optional[List[bool]]
snake_case : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 414 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case( A_ ):
'''simple docstring'''
_UpperCAmelCase = (DEISMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 2_5),)
def _a ( self , **__lowerCamelCase ):
'''simple docstring'''
__A : List[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**__lowerCamelCase )
return config
def _a ( self , __lowerCamelCase=0 , **__lowerCamelCase ):
'''simple docstring'''
__A : str = dict(self.forward_default_kwargs )
__A : List[Any] = kwargs.pop('num_inference_steps' , __lowerCamelCase )
__A : str = self.dummy_sample
__A : List[Any] = 0.1 * sample
__A : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__A : str = self.get_scheduler_config(**__lowerCamelCase )
__A : int = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
__A : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
__A : Dict = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
__A : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
__A : List[str] = sample, sample
for t in range(__lowerCamelCase , time_step + scheduler.config.solver_order + 1 ):
__A : List[str] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
__A : Any = new_scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _a ( self ):
'''simple docstring'''
pass
def _a ( self , __lowerCamelCase=0 , **__lowerCamelCase ):
'''simple docstring'''
__A : Dict = dict(self.forward_default_kwargs )
__A : int = kwargs.pop('num_inference_steps' , __lowerCamelCase )
__A : List[str] = self.dummy_sample
__A : Optional[Any] = 0.1 * sample
__A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__A : Tuple = self.get_scheduler_config()
__A : int = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
__A : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
__A : List[Any] = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
__A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
__A : Optional[Any] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
__A : List[Any] = new_scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _a ( self , __lowerCamelCase=None , **__lowerCamelCase ):
'''simple docstring'''
if scheduler is None:
__A : Any = self.scheduler_classes[0]
__A : int = self.get_scheduler_config(**__lowerCamelCase )
__A : List[Any] = scheduler_class(**__lowerCamelCase )
__A : Dict = self.scheduler_classes[0]
__A : Union[str, Any] = self.get_scheduler_config(**__lowerCamelCase )
__A : Dict = scheduler_class(**__lowerCamelCase )
__A : str = 10
__A : Dict = self.dummy_model()
__A : str = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase )
__A : Tuple = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def _a ( self ):
'''simple docstring'''
__A : Optional[Any] = dict(self.forward_default_kwargs )
__A : List[str] = kwargs.pop('num_inference_steps' , __lowerCamelCase )
for scheduler_class in self.scheduler_classes:
__A : Any = self.get_scheduler_config()
__A : Optional[Any] = scheduler_class(**__lowerCamelCase )
__A : Tuple = self.dummy_sample
__A : str = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCamelCase , 'set_timesteps' ):
scheduler.set_timesteps(__lowerCamelCase )
elif num_inference_steps is not None and not hasattr(__lowerCamelCase , 'set_timesteps' ):
__A : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__A : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
__A : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
__A : Union[str, Any] = scheduler.timesteps[5]
__A : Tuple = scheduler.timesteps[6]
__A : Tuple = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
__A : Optional[int] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _a ( self ):
'''simple docstring'''
__A : List[Any] = DEISMultistepScheduler(**self.get_scheduler_config() )
__A : Tuple = self.full_loop(scheduler=__lowerCamelCase )
__A : Union[str, Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
__A : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__A : Optional[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
__A : str = DEISMultistepScheduler.from_config(scheduler.config )
__A : str = self.full_loop(scheduler=__lowerCamelCase )
__A : List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def _a ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def _a ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , algorithm_type='deis' , solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , )
def _a ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def _a ( self ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , prediction_type=__lowerCamelCase , algorithm_type=__lowerCamelCase , )
__A : str = self.full_loop(
solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , prediction_type=__lowerCamelCase , algorithm_type=__lowerCamelCase , )
assert not torch.isnan(__lowerCamelCase ).any(), "Samples have nan numbers"
def _a ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=__lowerCamelCase )
self.check_over_configs(lower_order_final=__lowerCamelCase )
def _a ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCamelCase , time_step=0 )
def _a ( self ):
'''simple docstring'''
__A : int = self.full_loop()
__A : Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def _a ( self ):
'''simple docstring'''
__A : Any = self.full_loop(prediction_type='v_prediction' )
__A : Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.0_91 ) < 1e-3
def _a ( self ):
'''simple docstring'''
__A : List[str] = self.scheduler_classes[0]
__A : Dict = self.get_scheduler_config(thresholding=__lowerCamelCase , dynamic_thresholding_ratio=0 )
__A : Any = scheduler_class(**__lowerCamelCase )
__A : Dict = 10
__A : List[str] = self.dummy_model()
__A : List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__A : Union[str, Any] = model(__lowerCamelCase , __lowerCamelCase )
__A : List[str] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa
| 707 | """simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Tuple =logging.get_logger(__name__)
def _lowercase ( _SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
'''simple docstring'''
__A : List[str] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
if "model" in sd.keys():
__A : int = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# pop unnecessary weights
__A : str = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_SCREAMING_SNAKE_CASE )
__A : List[str] = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__A : Any = sd.pop(_SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__A : Tuple = sd[key]
# We split QKV in separate Q,K,V
__A : Any = key.replace('.qkv_proj.' , '.q_proj.' )
__A : Any = key.replace('.qkv_proj.' , '.k_proj.' )
__A : Any = key.replace('.qkv_proj.' , '.v_proj.' )
__A : List[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__A , __A , __A : List[str] = torch.split(_SCREAMING_SNAKE_CASE , depth // 3 , dim=0 )
__A : Optional[int] = q
__A : int = k
__A : List[str] = v
del sd[key]
return sd
@torch.no_grad()
def _lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=None ) -> List[str]:
'''simple docstring'''
__A : Dict = load_checkpoint(_SCREAMING_SNAKE_CASE )
if config is not None:
__A : Any = OPTConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
__A : Tuple = OPTConfig()
__A : Any = OPTModel(_SCREAMING_SNAKE_CASE ).half().eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check results
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
lowerCamelCase : Dict =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 237 | 0 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
@register_to_config
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False , )-> List[Any]:
'''simple docstring'''
super().__init__()
A__ : Optional[Any] =nn.Embedding(__UpperCamelCase , __UpperCamelCase )
A__ : Any =nn.Embedding(__UpperCamelCase , __UpperCamelCase )
A__ : Optional[Any] =False
A__ : Optional[Any] =nn.Dropout(p=__UpperCamelCase )
A__ : List[Any] =TaConfig(
vocab_size=__UpperCamelCase , d_model=__UpperCamelCase , num_heads=__UpperCamelCase , d_kv=__UpperCamelCase , d_ff=__UpperCamelCase , dropout_rate=__UpperCamelCase , feed_forward_proj=__UpperCamelCase , is_decoder=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , )
A__ : Dict =nn.ModuleList()
for lyr_num in range(__UpperCamelCase ):
A__ : str =TaBlock(__UpperCamelCase )
self.encoders.append(__UpperCamelCase )
A__ : Dict =TaLayerNorm(__UpperCamelCase )
A__ : List[str] =nn.Dropout(p=__UpperCamelCase )
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase )-> List[Any]:
'''simple docstring'''
A__ : Tuple =self.token_embedder(__UpperCamelCase )
A__ : str =encoder_input_tokens.shape[1]
A__ : Any =torch.arange(__UpperCamelCase , device=encoder_input_tokens.device )
x += self.position_encoding(__UpperCamelCase )
A__ : Union[str, Any] =self.dropout_pre(__UpperCamelCase )
# inverted the attention mask
A__ : Any =encoder_input_tokens.size()
A__ : Optional[Any] =self.get_extended_attention_mask(__UpperCamelCase , __UpperCamelCase )
for lyr in self.encoders:
A__ : Tuple =lyr(__UpperCamelCase , __UpperCamelCase )[0]
A__ : Tuple =self.layer_norm(__UpperCamelCase )
return self.dropout_post(__UpperCamelCase ), encoder_inputs_mask
| 416 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self )-> str:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A__ : Tuple =AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A__ : Dict =TFAutoModel.from_pretrained(__UpperCamelCase , from_pt=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A__ : Any =AutoModel.from_pretrained(__UpperCamelCase , from_tf=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def lowerCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A__ : Tuple =AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A__ : Tuple =TFAutoModelForPreTraining.from_pretrained(__UpperCamelCase , from_pt=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A__ : List[Any] =AutoModelForPreTraining.from_pretrained(__UpperCamelCase , from_tf=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def lowerCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[int] =AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A__ : Optional[int] =TFAutoModelForCausalLM.from_pretrained(__UpperCamelCase , from_pt=__UpperCamelCase )
A__ , A__ : List[Any] =TFAutoModelForCausalLM.from_pretrained(
__UpperCamelCase , output_loading_info=__UpperCamelCase , from_pt=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A__ : Dict =AutoModelForCausalLM.from_pretrained(__UpperCamelCase , from_tf=__UpperCamelCase )
A__ , A__ : List[Any] =AutoModelForCausalLM.from_pretrained(
__UpperCamelCase , output_loading_info=__UpperCamelCase , from_tf=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def lowerCAmelCase_ ( self )-> Any:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : List[str] =AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A__ : Optional[int] =TFAutoModelWithLMHead.from_pretrained(__UpperCamelCase , from_pt=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A__ : int =AutoModelWithLMHead.from_pretrained(__UpperCamelCase , from_tf=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def lowerCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Dict =AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A__ : Union[str, Any] =TFAutoModelForMaskedLM.from_pretrained(__UpperCamelCase , from_pt=__UpperCamelCase )
A__ , A__ : Union[str, Any] =TFAutoModelForMaskedLM.from_pretrained(
__UpperCamelCase , output_loading_info=__UpperCamelCase , from_pt=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A__ : List[Any] =AutoModelForMaskedLM.from_pretrained(__UpperCamelCase , from_tf=__UpperCamelCase )
A__ , A__ : Union[str, Any] =AutoModelForMaskedLM.from_pretrained(
__UpperCamelCase , output_loading_info=__UpperCamelCase , from_tf=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def lowerCAmelCase_ ( self )-> Any:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] =AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase , from_pt=__UpperCamelCase )
A__ , A__ : List[str] =TFAutoModelForSeqaSeqLM.from_pretrained(
__UpperCamelCase , output_loading_info=__UpperCamelCase , from_pt=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A__ : Any =AutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase , from_tf=__UpperCamelCase )
A__ , A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained(
__UpperCamelCase , output_loading_info=__UpperCamelCase , from_tf=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def lowerCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A__ : Dict =AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A__ : Optional[Any] =TFAutoModelForSequenceClassification.from_pretrained(__UpperCamelCase , from_pt=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A__ : Optional[int] =AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase , from_tf=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def lowerCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A__ : Optional[Any] =AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A__ : Tuple =TFAutoModelForQuestionAnswering.from_pretrained(__UpperCamelCase , from_pt=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A__ : int =AutoModelForQuestionAnswering.from_pretrained(__UpperCamelCase , from_tf=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
A__ : str =TFAutoModelWithLMHead.from_pretrained(__UpperCamelCase , from_pt=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__UpperCamelCase ) , 1_44_10 )
A__ : List[str] =AutoModelWithLMHead.from_pretrained(__UpperCamelCase , from_tf=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__UpperCamelCase ) , 1_44_10 )
def lowerCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
A__ : Any =TFAutoModelWithLMHead.from_pretrained(__UpperCamelCase , from_pt=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__UpperCamelCase ) , 1_44_10 )
A__ : Tuple =AutoModelWithLMHead.from_pretrained(__UpperCamelCase , from_tf=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__UpperCamelCase ) , 1_44_10 )
| 416 | 1 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
__lowerCamelCase = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
__lowerCamelCase = dict(scheduler.config )
__lowerCamelCase = 1
__lowerCamelCase = FrozenDict(UpperCamelCase__ )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
__lowerCamelCase = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
__lowerCamelCase = dict(scheduler.config )
__lowerCamelCase = True
__lowerCamelCase = FrozenDict(UpperCamelCase__ )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=UpperCamelCase__ , segmentation_processor=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , )
def lowerCamelCase_ ( self , UpperCAmelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__ )
def lowerCamelCase_ ( self ):
self.enable_attention_slicing(UpperCamelCase__ )
def lowerCamelCase_ ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__ , UpperCamelCase__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase_ ( self ):
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 5_1_2 , UpperCAmelCase = 5_1_2 , UpperCAmelCase = 5_0 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ):
__lowerCamelCase = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
__lowerCamelCase = self.segmentation_model(**UpperCamelCase__ )
__lowerCamelCase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__lowerCamelCase = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , )
| 700 |
def UpperCamelCase__ ( _A: int ):
'''simple docstring'''
if not isinstance(_A , _A ):
__lowerCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_A )
if number < 0:
return False
__lowerCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 571 | 0 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_lowercase : Union[str, Any] = imread(r'digital_image_processing/image_data/lena_small.jpg')
_lowercase : Any = cvtColor(img, COLOR_BGR2GRAY)
def lowercase__ ( ):
__UpperCAmelCase = cn.convert_to_negative(snake_case_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowercase__ ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(snake_case_ , 110 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def lowercase__ ( ):
__UpperCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowercase__ ( ):
__UpperCAmelCase = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__UpperCAmelCase = canny.canny(snake_case_ )
# assert canny array for at least one True
assert canny_array.any()
def lowercase__ ( ):
assert gg.gaussian_filter(snake_case_ , 5 , sigma=0.9 ).all()
def lowercase__ ( ):
# laplace diagonals
__UpperCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__UpperCAmelCase = conv.img_convolve(snake_case_ , snake_case_ ).astype(snake_case_ )
assert res.any()
def lowercase__ ( ):
assert med.median_filter(snake_case_ , 3 ).any()
def lowercase__ ( ):
__UpperCAmelCase , __UpperCAmelCase = sob.sobel_filter(snake_case_ )
assert grad.any() and theta.any()
def lowercase__ ( ):
__UpperCAmelCase = sp.make_sepia(snake_case_ , 20 )
assert sepia.all()
def lowercase__ ( snake_case_ :str = "digital_image_processing/image_data/lena_small.jpg" ):
__UpperCAmelCase = bs.Burkes(imread(snake_case_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowercase__ ( snake_case_ :str = "digital_image_processing/image_data/lena_small.jpg" , ):
__UpperCAmelCase = rs.NearestNeighbour(imread(snake_case_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowercase__ ( ):
__UpperCAmelCase = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
__UpperCAmelCase = imread(snake_case_ , 0 )
# Test for get_neighbors_pixel function() return not None
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = image[x_coordinate][y_coordinate]
__UpperCAmelCase = lbp.get_neighbors_pixel(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__UpperCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__UpperCAmelCase = lbp.local_binary_value(snake_case_ , snake_case_ , snake_case_ )
assert lbp_image.any()
| 49 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
lowercase : Optional[Any] = {
'nielsr/canine-s': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
lowercase : Dict = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowercase : List[str] = 0
lowercase : List[str] = 0Xe_000
lowercase : Optional[int] = 0Xe_001
lowercase : Union[str, Any] = 0Xe_002
lowercase : List[str] = 0Xe_003
lowercase : str = 0Xe_004
# Maps special codepoints to human-readable names.
lowercase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowercase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Optional[int]=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : int=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : Tuple=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : Tuple=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : Union[str, Any]=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : int=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : List[Any]=2_0_4_8 , **SCREAMING_SNAKE_CASE : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else bos_token
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else eos_token
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else sep_token
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else cls_token
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , model_max_length=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
# Creates a mapping for looking up the IDs of special symbols.
lowerCAmelCase = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCAmelCase = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCAmelCase = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCAmelCase = UNICODE_VOCAB_SIZE
lowerCAmelCase = len(self._special_codepoints )
@property
def __A ( self : List[Any] ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def __A ( self : str , SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
return list(SCREAMING_SNAKE_CASE )
def __A ( self : Dict , SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
try:
return ord(SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def __A ( self : str , SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def __A ( self : Any , SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
return "".join(SCREAMING_SNAKE_CASE )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __A ( self : List[str] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
lowerCAmelCase = [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
result += ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return result
def __A ( self : Dict , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __A ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ) -> str:
"""simple docstring"""
return ()
| 649 | 0 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class A :
'''simple docstring'''
pass
| 206 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str]):
_lowercase: Union[str, Any] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
_lowercase: Dict = sd_pipe.to(_UpperCamelCase)
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase)
sd_pipe.set_scheduler("sample_euler")
_lowercase: List[Any] = "A painting of a squirrel eating a burger"
_lowercase: Optional[Any] = torch.manual_seed(0)
_lowercase: int = sd_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type="np")
_lowercase: Any = output.images
_lowercase: Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase: int = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self : List[Any]):
_lowercase: Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base")
_lowercase: Optional[Any] = sd_pipe.to(_UpperCamelCase)
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase)
sd_pipe.set_scheduler("sample_euler")
_lowercase: Tuple = "A painting of a squirrel eating a burger"
_lowercase: Any = torch.manual_seed(0)
_lowercase: Union[str, Any] = sd_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type="np")
_lowercase: Optional[Any] = output.images
_lowercase: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase: Tuple = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-1
def UpperCAmelCase__ ( self : str):
_lowercase: List[str] = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base")
_lowercase: List[str] = sd_pipe.to(_UpperCamelCase)
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase)
sd_pipe.set_scheduler("sample_dpmpp_2m")
_lowercase: List[str] = "A painting of a squirrel eating a burger"
_lowercase: Tuple = torch.manual_seed(0)
_lowercase: List[str] = sd_pipe(
[prompt] , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=_UpperCamelCase , )
_lowercase: Optional[Any] = output.images
_lowercase: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase: str = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 206 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase__ : Tuple ):
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,'''vision''' )
self.check_model_type(lowercase__ )
def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,):
if "text_queries" in kwargs:
__lowercase = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ ,(str, Image.Image) ):
__lowercase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs['''threshold''']
if "top_k" in kwargs:
__lowercase = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = inputs['''candidate_labels''']
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = candidate_labels.split(''',''' )
__lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework )
__lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = model_inputs.pop('''target_size''' )
__lowercase = model_inputs.pop('''candidate_label''' )
__lowercase = model_inputs.pop('''is_last''' )
__lowercase = self.model(**lowercase__ )
__lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ):
__lowercase = []
for model_output in model_outputs:
__lowercase = model_output['''candidate_label''']
__lowercase = BaseModelOutput(lowercase__ )
__lowercase = self.image_processor.post_process_object_detection(
outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowercase = outputs['''scores'''][index].item()
__lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowercase = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ )
if top_k:
__lowercase = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist()
__lowercase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 41 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=2 , lowerCamelCase=99 , lowerCamelCase=0 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=5_12 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase="last" , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=0 , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Any = parent
UpperCamelCase : int = batch_size
UpperCamelCase : str = seq_length
UpperCamelCase : Dict = is_training
UpperCamelCase : int = use_input_lengths
UpperCamelCase : int = use_token_type_ids
UpperCamelCase : Any = use_labels
UpperCamelCase : List[Any] = gelu_activation
UpperCamelCase : Optional[int] = sinusoidal_embeddings
UpperCamelCase : str = causal
UpperCamelCase : Tuple = asm
UpperCamelCase : Any = n_langs
UpperCamelCase : Any = vocab_size
UpperCamelCase : Optional[Any] = n_special
UpperCamelCase : Optional[Any] = hidden_size
UpperCamelCase : List[str] = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : List[Any] = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : List[str] = type_sequence_label_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Union[str, Any] = num_labels
UpperCamelCase : int = num_choices
UpperCamelCase : Union[str, Any] = summary_type
UpperCamelCase : Union[str, Any] = use_proj
UpperCamelCase : Optional[int] = scope
UpperCamelCase : Any = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : List[str] = None
if self.use_input_lengths:
UpperCamelCase : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase : List[str] = None
if self.use_token_type_ids:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase : List[str] = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Dict = None
if self.use_labels:
UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : str = ids_tensor([self.batch_size] , 2 ).float()
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Dict:
'''simple docstring'''
UpperCamelCase : Optional[Any] = XLMModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : Optional[Any] = model(lowerCamelCase , lengths=lowerCamelCase , langs=lowerCamelCase )
UpperCamelCase : Optional[Any] = model(lowerCamelCase , langs=lowerCamelCase )
UpperCamelCase : List[str] = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> int:
'''simple docstring'''
UpperCamelCase : Optional[int] = XLMWithLMHeadModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : Tuple = model(lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Tuple:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = XLMForQuestionAnsweringSimple(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : List[str] = model(lowerCamelCase )
UpperCamelCase : Optional[int] = model(lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase )
UpperCamelCase : int = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> int:
'''simple docstring'''
UpperCamelCase : Optional[int] = XLMForQuestionAnswering(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : List[str] = model(lowerCamelCase )
UpperCamelCase : Any = model(
lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , cls_index=lowerCamelCase , is_impossible=lowerCamelCase , p_mask=lowerCamelCase , )
UpperCamelCase : Optional[Any] = model(
lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , cls_index=lowerCamelCase , is_impossible=lowerCamelCase , )
((UpperCamelCase) , ) : Any = result_with_labels.to_tuple()
UpperCamelCase : Dict = model(lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase )
((UpperCamelCase) , ) : Tuple = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : int = XLMForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : List[Any] = model(lowerCamelCase )
UpperCamelCase : Dict = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : int = XLMForTokenClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : Union[str, Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Optional[int] = self.num_choices
UpperCamelCase : Dict = XLMForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Optional[int] = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
UpperCamelCase : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ) -> Tuple:
'''simple docstring'''
UpperCamelCase : Tuple = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCamelCase : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
UpperCamelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Tuple = XLMModelTester(self )
UpperCamelCase : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase , emb_dim=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1 ) -> Optional[Any]:
'''simple docstring'''
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertListEqual(
[isinstance(lowerCamelCase , lowerCamelCase ) for iter_attentions in attentions] , [True] * len(lowerCamelCase ) )
self.assertEqual(len(lowerCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCamelCase ):
# adds PAD dummy token
UpperCamelCase : Dict = min_length + idx + 1
UpperCamelCase : int = min_length + idx + 1
UpperCamelCase : Union[str, Any] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1 ) -> Optional[int]:
'''simple docstring'''
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertListEqual(
[isinstance(lowerCamelCase , lowerCamelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCamelCase ) , )
self.assertEqual(len(lowerCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCamelCase ):
# adds PAD dummy token
UpperCamelCase : Tuple = min_length + idx + 1
UpperCamelCase : str = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCamelCase ) , )
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Union[str, Any] = XLMModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
UpperCamelCase : int = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(lowerCamelCase )
UpperCamelCase : Optional[int] = torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCamelCase ) # the president
UpperCamelCase : Any = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCamelCase : List[Any] = model.generate(lowerCamelCase , do_sample=lowerCamelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCamelCase )
| 173 | 0 |
from collections import defaultdict
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str ) -> bool:
"""simple docstring"""
A__ : Tuple = first_str.lower().strip()
A__ : str = second_str.lower().strip()
# Remove whitespace
A__ : Optional[Any] = first_str.replace(''' ''' , '''''' )
A__ : str = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_A ) != len(_A ):
return False
# Default values for count should be 0
A__ : int = defaultdict(_A )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_A ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_SCREAMING_SNAKE_CASE : Optional[int] = input('Enter the first string ').strip()
_SCREAMING_SNAKE_CASE : str = input('Enter the second string ').strip()
_SCREAMING_SNAKE_CASE : Union[str, Any] = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""") | 708 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=[30, 30] , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=8 , UpperCamelCase__=10 , ):
A__ : Optional[int] = parent
A__ : List[Any] = batch_size
A__ : Dict = image_size
A__ : Any = patch_size
A__ : Dict = num_channels
A__ : List[Any] = is_training
A__ : int = use_labels
A__ : Any = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : str = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[int] = type_sequence_label_size
A__ : Any = initializer_range
A__ : Optional[int] = num_labels
A__ : Union[str, Any] = scope
A__ : Union[str, Any] = n_targets
A__ : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ : List[str] = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self ):
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ : Tuple = []
for i in range(self.batch_size ):
A__ : List[Any] = {}
A__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
A__ : Any = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
A__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self ):
A__ : Optional[int] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Optional[Any] = config_and_inputs
A__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Optional[int] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ : str = []
for i in range(self.model_tester.batch_size ):
A__ : int = {}
A__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
A__ : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
A__ : Dict = labels
return inputs_dict
def __snake_case ( self ):
A__ : List[Any] = YolosModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(UpperCamelCase__ )
A__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Tuple = True
# in YOLOS, the seq_len is different
A__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ : Any = True
A__ : Optional[int] = False
A__ : Optional[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Tuple = True
A__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ : List[Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[str] = True
A__ : List[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
A__ : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] = outputs.hidden_states
A__ : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
A__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ )
A__ : str = self.default_image_processor
A__ : Tuple = prepare_img()
A__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Any = model(inputs.pixel_values )
# verify outputs
A__ : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=UpperCamelCase__ , )
A__ : Optional[int] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify postprocessing
A__ : Dict = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase__ )
A__ : str = [75, 75, 17, 63, 17]
A__ : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(UpperCamelCase__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) ) | 55 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''naver-clova-ix/donut-base-finetuned-docvqa'''
lowerCAmelCase = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
lowerCAmelCase = '''document_qa'''
lowerCAmelCase = AutoProcessor
lowerCAmelCase = VisionEncoderDecoderModel
lowerCAmelCase = ['''image''', '''text''']
lowerCAmelCase = ['''text''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.')
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__A : List[Any] = task_prompt.replace('{user_input}' , _UpperCAmelCase)
__A : Tuple = self.pre_processor.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors='pt').input_ids
__A : Any = self.pre_processor(_UpperCAmelCase , return_tensors='pt').pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.model.generate(
inputs['pixel_values'].to(self.device) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_UpperCAmelCase , ).sequences
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = self.pre_processor.batch_decode(_UpperCAmelCase)[0]
__A : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , '')
__A : int = sequence.replace(self.pre_processor.tokenizer.pad_token , '')
__A : Any = re.sub(R'<.*?>' , '' , _UpperCAmelCase , count=1).strip() # remove first task start token
__A : List[str] = self.pre_processor.tokenajson(_UpperCAmelCase)
return sequence["answer"] | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741
__A : Tuple = len(__snake_case )
__A : Optional[int] = 0
__A : str = [0] * n
__A : int = [False] * n
__A : Tuple = [False] * n
def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ):
if parent == root:
out_edge_count += 1
__A : str = True
__A : Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__A : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__A : Tuple = True
# AP found via cycle
if at == low[to]:
__A : Optional[Any] = True
else:
__A : Any = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__A : Tuple = 0
__A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case )
__A : Union[str, Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
lowercase__ : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 8 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[int] = """pix2struct_text_model"""
snake_case : Optional[Any] = ["""past_key_values"""]
snake_case : List[str] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __lowerCAmelCase=50244 , __lowerCAmelCase=768 , __lowerCAmelCase=64 , __lowerCAmelCase=2048 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=32 , __lowerCAmelCase=128 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=1.0 , __lowerCAmelCase="gelu_new" , __lowerCAmelCase=0 , __lowerCAmelCase=False , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=False , __lowerCAmelCase=True , **__lowerCAmelCase , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = d_kv
UpperCamelCase__ = d_ff
UpperCamelCase__ = num_layers
UpperCamelCase__ = num_heads
UpperCamelCase__ = relative_attention_num_buckets
UpperCamelCase__ = relative_attention_max_distance
UpperCamelCase__ = dropout_rate
UpperCamelCase__ = layer_norm_epsilon
UpperCamelCase__ = initializer_factor
UpperCamelCase__ = use_cache
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = decoder_start_token_id
# for backwards compatibility
UpperCamelCase__ = dense_act_fn
super().__init__(
pad_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , is_decoder=__lowerCAmelCase , **__lowerCAmelCase , )
@classmethod
def _lowerCamelCase ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
cls._set_token_in_kwargs(__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCamelCase__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Union[str, Any] = """pix2struct_vision_model"""
def __init__( self , __lowerCAmelCase=768 , __lowerCAmelCase=768 , __lowerCAmelCase=2048 , __lowerCAmelCase=64 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase="gelu_new" , __lowerCAmelCase=1E-6 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=1E-10 , __lowerCAmelCase=1.0 , __lowerCAmelCase=4096 , __lowerCAmelCase=32 , __lowerCAmelCase=128 , **__lowerCAmelCase , ):
super().__init__(**__lowerCAmelCase )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = patch_embed_hidden_size
UpperCamelCase__ = d_ff
UpperCamelCase__ = dropout_rate
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = initializer_range
UpperCamelCase__ = initializer_factor
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = dense_act_fn
UpperCamelCase__ = seq_len
UpperCamelCase__ = relative_attention_num_buckets
UpperCamelCase__ = relative_attention_max_distance
UpperCamelCase__ = d_kv
@classmethod
def _lowerCamelCase ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
cls._set_token_in_kwargs(__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCamelCase__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[Any] = """pix2struct"""
snake_case : Optional[Any] = True
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=True , **__lowerCAmelCase , ):
super().__init__(tie_word_embeddings=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase )
if text_config is None:
UpperCamelCase__ = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
UpperCamelCase__ = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
UpperCamelCase__ = PixaStructTextConfig(**__lowerCAmelCase )
UpperCamelCase__ = PixaStructVisionConfig(**__lowerCAmelCase )
UpperCamelCase__ = self.text_config.decoder_start_token_id
UpperCamelCase__ = self.text_config.pad_token_id
UpperCamelCase__ = self.text_config.eos_token_id
UpperCamelCase__ = initializer_factor
UpperCamelCase__ = initializer_range
UpperCamelCase__ = self.initializer_range
UpperCamelCase__ = self.initializer_range
UpperCamelCase__ = is_vqa
@classmethod
def _lowerCamelCase ( cls , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.text_config.to_dict()
UpperCamelCase__ = self.vision_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 548 |
import inspect
import unittest
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _lowerCamelCase ( self ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _lowerCamelCase ( self ):
import diffusers
from diffusers.dependency_versions_table import deps
UpperCamelCase__ = inspect.getmembers(__lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
UpperCamelCase__ = """k-diffusion"""
elif backend == "invisible_watermark":
UpperCamelCase__ = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 548 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase:
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int]=1_3 , SCREAMING_SNAKE_CASE : str=7 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : int=9_9 , SCREAMING_SNAKE_CASE : Any=1_6 , SCREAMING_SNAKE_CASE : Optional[Any]=3_6 , SCREAMING_SNAKE_CASE : Any=6 , SCREAMING_SNAKE_CASE : List[str]=6 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=3_7 , SCREAMING_SNAKE_CASE : str="gelu" , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=5_1_2 , SCREAMING_SNAKE_CASE : Optional[int]=1_6 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Optional[int]=4 , SCREAMING_SNAKE_CASE : Any=None , ) -> List[Any]:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = embedding_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_hidden_groups
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = AlbertModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
__snake_case = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
__snake_case = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = AlbertForPreTraining(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , sentence_order_label=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
'''simple docstring'''
__snake_case = AlbertForMaskedLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ) -> int:
'''simple docstring'''
__snake_case = AlbertForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.num_labels
__snake_case = AlbertForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.num_labels
__snake_case = AlbertForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ) -> Tuple:
'''simple docstring'''
__snake_case = self.num_choices
__snake_case = AlbertForMultipleChoice(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase( _a , _a , unittest.TestCase ):
snake_case_ : Tuple = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ : Dict = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ : str = True
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
__snake_case = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE ):
__snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = AlbertModelTester(self )
__snake_case = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AlbertModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_torch
class UpperCamelCase( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case = AlbertModel.from_pretrained("albert-base-v2" )
__snake_case = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )[0]
__snake_case = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
__snake_case = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 371 |
import inspect
import unittest
class UpperCamelCase( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
__snake_case = inspect.getmembers(SCREAMING_SNAKE_CASE , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__snake_case = "k-diffusion"
elif backend == "invisible_watermark":
__snake_case = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 371 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: Tuple = logging.get_logger(__name__)
__a: List[Any] = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 'vit_mae'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , _lowercase=512 , _lowercase=8 , _lowercase=2048 , _lowercase=0.75 , _lowercase=False , **_lowercase , ) -> Tuple:
super().__init__(**_lowercase )
lowercase_ : str = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : int = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : Union[str, Any] = layer_norm_eps
lowercase_ : Optional[int] = image_size
lowercase_ : str = patch_size
lowercase_ : Dict = num_channels
lowercase_ : int = qkv_bias
lowercase_ : Optional[Any] = decoder_num_attention_heads
lowercase_ : Any = decoder_hidden_size
lowercase_ : Optional[int] = decoder_num_hidden_layers
lowercase_ : Optional[int] = decoder_intermediate_size
lowercase_ : Optional[Any] = mask_ratio
lowercase_ : Optional[int] = norm_pix_loss
| 710 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7 | 0 |
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__SCREAMING_SNAKE_CASE ={
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8000,
"sample_size": 6_5536,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8000,
"sample_size": 6_5536,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8000,
"sample_size": 13_1072,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
}
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
return torch.atana(A__ , A__ ) / math.pi * 2
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : Optional[int] = torch.sin(t * math.pi / 2 ) ** 2
lowercase_ : List[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(A__ , A__ )
class UpperCamelCase ( __A ):
pass
class UpperCamelCase ( nn.Module ):
def __init__( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
super().__init__()
lowercase_ : Tuple = DiffusionAttnUnetaD(lowerCAmelCase_ ,n_attn_layers=4 )
lowercase_ : Dict = deepcopy(self.diffusion )
lowercase_ : Tuple = torch.quasirandom.SobolEngine(1 ,scramble=lowerCAmelCase_ )
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase_ : int = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
__SCREAMING_SNAKE_CASE ={
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__SCREAMING_SNAKE_CASE ={
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__SCREAMING_SNAKE_CASE ={
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__SCREAMING_SNAKE_CASE ={
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__SCREAMING_SNAKE_CASE ={
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__SCREAMING_SNAKE_CASE ={
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
for key, value in ATTN_MAP.items():
if name.startswith(A__ ) and not isinstance(A__ , A__ ):
return name.replace(A__ , A__ )
elif name.startswith(A__ ):
return [name.replace(A__ , A__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any]=13 ):
lowercase_ : List[str] = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
lowercase_ : Optional[Any] = 0
if string.startswith('net.3.' ):
depth += 1
lowercase_ : List[str] = string[6:]
elif string.startswith('net.' ):
lowercase_ : int = string[4:]
while string.startswith('main.7.' ):
depth += 1
lowercase_ : Optional[Any] = string[7:]
if string.startswith('main.' ):
lowercase_ : int = string[5:]
# mid block
if string[:2].isdigit():
lowercase_ : Dict = string[:2]
lowercase_ : Dict = string[2:]
else:
lowercase_ : Union[str, Any] = string[0]
lowercase_ : str = string[1:]
if depth == max_depth:
lowercase_ : Optional[int] = MID_NUM_TO_LAYER[layer_num]
lowercase_ : List[Any] = "mid_block"
elif depth > 0 and int(A__ ) < 7:
lowercase_ : str = DOWN_NUM_TO_LAYER[layer_num]
lowercase_ : Optional[Any] = F'''down_blocks.{depth}'''
elif depth > 0 and int(A__ ) > 7:
lowercase_ : Optional[Any] = UP_NUM_TO_LAYER[layer_num]
lowercase_ : List[str] = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
lowercase_ : Tuple = DEPTH_0_TO_LAYER[layer_num]
lowercase_ : Tuple = F'''up_blocks.{max_depth - 1}''' if int(A__ ) > 3 else "down_blocks.0"
if not string_left.startswith('.' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
lowercase_ : Any = string_left[1:]
if "resnets" in new_layer:
lowercase_ : Dict = convert_resconv_naming(A__ )
elif "attentions" in new_layer:
lowercase_ : Tuple = convert_attn_naming(A__ )
lowercase_ : Optional[Any] = new_string_left
if not isinstance(A__ , A__ ):
lowercase_ : List[Any] = prefix + "." + new_layer + "." + string_left
else:
lowercase_ : str = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : Dict = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
lowercase_ : Dict = rename(A__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(A__ , A__ ):
lowercase_ : List[str] = transform_conv_attns(A__ , A__ , A__ )
else:
lowercase_ : Optional[Any] = v
return new_state_dict
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ):
if len(A__ ) == 1:
if len(v.shape ) == 3:
# weight
lowercase_ : Any = v[:, :, 0]
else:
# bias
lowercase_ : Any = v
else:
# qkv matrices
lowercase_ : Any = v.shape[0]
lowercase_ : Union[str, Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowercase_ : str = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowercase_ : Optional[Any] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Union[str, Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowercase_ : Optional[int] = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
lowercase_ : Dict = download(A__ )
lowercase_ : int = MODELS_MAP[model_name]["sample_rate"]
lowercase_ : Optional[int] = MODELS_MAP[model_name]["sample_size"]
lowercase_ : Optional[int] = Object()
lowercase_ : Optional[Any] = sample_size
lowercase_ : Dict = sample_rate
lowercase_ : str = 0
lowercase_ : Union[str, Any] = UNetaDModel(sample_size=A__ , sample_rate=A__ )
lowercase_ : List[Any] = diffusers_model.state_dict()
lowercase_ : List[Any] = DiffusionUncond(A__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=A__ )['state_dict'] )
lowercase_ : Optional[Any] = orig_model.diffusion_ema.eval()
lowercase_ : Dict = orig_model.state_dict()
lowercase_ : Optional[Any] = rename_orig_weights(A__ )
lowercase_ : Optional[int] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowercase_ : Any = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(A__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('kernel' ) for k in list(A__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
lowercase_ : Tuple = value.squeeze()
lowercase_ : List[Any] = value
diffusers_model.load_state_dict(A__ )
lowercase_ : Optional[int] = 1_00
lowercase_ : Any = 33
lowercase_ : Optional[Any] = IPNDMScheduler(num_train_timesteps=A__ )
lowercase_ : Dict = torch.manual_seed(A__ )
lowercase_ : Union[str, Any] = torch.randn([1, 2, config.sample_size] , generator=A__ ).to(A__ )
lowercase_ : Optional[int] = torch.linspace(1 , 0 , steps + 1 , device=A__ )[:-1]
lowercase_ : Optional[int] = get_crash_schedule(A__ )
lowercase_ : Tuple = DanceDiffusionPipeline(unet=A__ , scheduler=A__ )
lowercase_ : Optional[int] = torch.manual_seed(33 )
lowercase_ : int = pipe(num_inference_steps=A__ , generator=A__ ).audios
lowercase_ : Union[str, Any] = sampling.iplms_sample(A__ , A__ , A__ , {} )
lowercase_ : Dict = generated.clamp(-1 , 1 )
lowercase_ : str = (generated - audio).abs().sum()
lowercase_ : Optional[Any] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , A__ )
print('Diff max' , A__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
__SCREAMING_SNAKE_CASE =parser.parse_args()
main(args)
| 425 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''informer'''
__magic_name__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "student_t" , lowerCAmelCase_ : str = "nll" , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : List[int] = None , lowerCAmelCase_ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : int = 64 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 100 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : str = "prob" , lowerCAmelCase_ : int = 5 , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Tuple , ) -> Tuple:
# time series specific configuration
UpperCAmelCase_ : str = prediction_length
UpperCAmelCase_ : Tuple = context_length or prediction_length
UpperCAmelCase_ : Any = distribution_output
UpperCAmelCase_ : Union[str, Any] = loss
UpperCAmelCase_ : Any = input_size
UpperCAmelCase_ : int = num_time_features
UpperCAmelCase_ : Union[str, Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : List[Any] = scaling
UpperCAmelCase_ : List[str] = num_dynamic_real_features
UpperCAmelCase_ : int = num_static_real_features
UpperCAmelCase_ : Any = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase_ : Optional[Any] = cardinality
else:
UpperCAmelCase_ : Optional[int] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase_ : Any = embedding_dimension
else:
UpperCAmelCase_ : Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase_ : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ : Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase_ : List[Any] = d_model
UpperCAmelCase_ : List[str] = encoder_attention_heads
UpperCAmelCase_ : List[str] = decoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : List[Any] = decoder_layers
UpperCAmelCase_ : List[str] = dropout
UpperCAmelCase_ : Optional[Any] = attention_dropout
UpperCAmelCase_ : Any = activation_dropout
UpperCAmelCase_ : Union[str, Any] = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = decoder_layerdrop
UpperCAmelCase_ : Union[str, Any] = activation_function
UpperCAmelCase_ : int = init_std
UpperCAmelCase_ : Optional[Any] = use_cache
# Informer
UpperCAmelCase_ : int = attention_type
UpperCAmelCase_ : List[str] = sampling_factor
UpperCAmelCase_ : Union[str, Any] = distil
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 95 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
a_ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : Dict = """table-transformer"""
a_ : List[Any] = ["""past_key_values"""]
a_ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=100 , A=6 , A=2048 , A=8 , A=6 , A=2048 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=256 , A=0.1 , A=0.0 , A=0.0 , A=0.0_2 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowerCamelCase : Tuple = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(A , A ):
_lowerCamelCase : Optional[Any] = backbone_config.get('model_type' )
_lowerCamelCase : Any = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase : List[str] = config_class.from_dict(A )
# set timm attributes to None
_lowerCamelCase : str = None, None, None
_lowerCamelCase : List[Any] = use_timm_backbone
_lowerCamelCase : Dict = backbone_config
_lowerCamelCase : Any = num_channels
_lowerCamelCase : str = num_queries
_lowerCamelCase : Optional[int] = d_model
_lowerCamelCase : int = encoder_ffn_dim
_lowerCamelCase : int = encoder_layers
_lowerCamelCase : str = encoder_attention_heads
_lowerCamelCase : List[Any] = decoder_ffn_dim
_lowerCamelCase : int = decoder_layers
_lowerCamelCase : Optional[Any] = decoder_attention_heads
_lowerCamelCase : str = dropout
_lowerCamelCase : str = attention_dropout
_lowerCamelCase : List[Any] = activation_dropout
_lowerCamelCase : str = activation_function
_lowerCamelCase : int = init_std
_lowerCamelCase : Tuple = init_xavier_std
_lowerCamelCase : Tuple = encoder_layerdrop
_lowerCamelCase : Union[str, Any] = decoder_layerdrop
_lowerCamelCase : str = encoder_layers
_lowerCamelCase : Dict = auxiliary_loss
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : Dict = backbone
_lowerCamelCase : int = use_pretrained_backbone
_lowerCamelCase : List[Any] = dilation
# Hungarian matcher
_lowerCamelCase : Optional[Any] = class_cost
_lowerCamelCase : Optional[Any] = bbox_cost
_lowerCamelCase : Optional[int] = giou_cost
# Loss coefficients
_lowerCamelCase : Optional[int] = mask_loss_coefficient
_lowerCamelCase : Optional[int] = dice_loss_coefficient
_lowerCamelCase : str = bbox_loss_coefficient
_lowerCamelCase : Dict = giou_loss_coefficient
_lowerCamelCase : Dict = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def _lowerCAmelCase ( self ):
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self ):
return self.d_model
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : Optional[Any] = version.parse("""1.11""" )
@property
def _lowerCAmelCase ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def _lowerCAmelCase ( self ):
return 1E-5
@property
def _lowerCAmelCase ( self ):
return 12
| 717 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
a_ = {"""target_lang""": """fi""", """source_lang""": """en"""}
a_ = """>>zh<<"""
a_ = """Helsinki-NLP/"""
if is_torch_available():
a_ = """pt"""
elif is_tf_available():
a_ = """tf"""
else:
a_ = """jax"""
@require_sentencepiece
class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : Optional[int] = MarianTokenizer
a_ : Optional[Any] = False
a_ : Optional[int] = True
def _lowerCAmelCase ( self ):
super().setUp()
_lowerCamelCase : Optional[Any] = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
_lowerCamelCase : Tuple = dict(zip(A , range(len(A ) ) ) )
_lowerCamelCase : Union[str, Any] = Path(self.tmpdirname )
save_json(A , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(A , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(A , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(A , save_dir / VOCAB_FILES_NAMES['target_spm'] )
_lowerCamelCase : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self , **A ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowerCAmelCase ( self , A ):
return (
"This is a test",
"This is a test",
)
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[str] = '</s>'
_lowerCamelCase : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(A ) , 9 )
def _lowerCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = MarianTokenizer.from_pretrained(F"{ORG_NAME}opus-mt-en-de" )
_lowerCamelCase : Dict = en_de_tokenizer(['I am a small frog'] , return_tensors=A )
self.assertIsInstance(A , A )
_lowerCamelCase : Optional[int] = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(A , batch.input_ids[0] )
_lowerCamelCase : Dict = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(A )
_lowerCamelCase : Tuple = [x.name for x in Path(A ).glob('*' )]
self.assertIn('source.spm' , A )
MarianTokenizer.from_pretrained(A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : str = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=A , truncation=A , return_tensors=A )
self.assertIsInstance(A , A )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : List[Any] = tok(['I am a tiny frog', 'I am a small frog'] , padding=A , return_tensors=A )
self.assertIsInstance(A , A )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def _lowerCAmelCase ( self ):
# fmt: off
_lowerCamelCase : int = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
_lowerCamelCase : List[Any] = 'Tämä on testi'
_lowerCamelCase : Optional[int] = 'This is a test'
_lowerCamelCase : Any = [76, 7, 2047, 2]
_lowerCamelCase : Union[str, Any] = [69, 12, 11, 940, 2]
_lowerCamelCase : List[Any] = tokenizer(A ).input_ids
self.assertListEqual(A , A )
_lowerCamelCase : Optional[int] = tokenizer(text_target=A ).input_ids
self.assertListEqual(A , A )
_lowerCamelCase : Union[str, Any] = tokenizer.decode(A , skip_special_tokens=A )
self.assertEqual(A , A )
| 349 | 0 |
"""simple docstring"""
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : int ):
_A = n
_A = [None] * self.n
_A = 0 # index of the first element
_A = 0
_A = 0
def __len__( self : str ):
return self.size
def lowerCAmelCase_ ( self : Dict ):
return self.size == 0
def lowerCAmelCase_ ( self : List[Any] ):
return False if self.is_empty() else self.array[self.front]
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Tuple ):
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
_A = data
_A = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCAmelCase_ ( self : str ):
if self.size == 0:
raise Exception('UNDERFLOW' )
_A = self.array[self.front]
_A = None
_A = (self.front + 1) % self.n
self.size -= 1
return temp
| 7 |
"""simple docstring"""
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = None
_A = None
_A = graph
self._normalize_graph(_UpperCAmelCase , _UpperCAmelCase )
_A = len(_UpperCAmelCase )
_A = None
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ):
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_UpperCAmelCase ) > 1 or len(_UpperCAmelCase ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] ):
_A = algorithm(self )
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Union[str, Any] ):
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
self._algorithm()
_A = True
def lowerCAmelCase_ ( self : int ):
pass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : Any ):
super().__init__(_UpperCAmelCase )
# use this to save your result
_A = -1
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[Any] ):
super().__init__(_UpperCAmelCase )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def lowerCAmelCase_ ( self : Dict ):
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(_UpperCAmelCase ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(_UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_UpperCAmelCase ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_UpperCAmelCase , _UpperCAmelCase )
self.relabel(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int ):
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a = [0]
a = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 7 | 1 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
| 694 |
'''simple docstring'''
def _A ( snake_case__ : int = 4_00_00_00 ):
snake_case__ : int = []
snake_case__ ,snake_case__ : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case__ )
snake_case__ ,snake_case__ : Any = b, a + b
return sum(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.