code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase_ = {
"""distilbert-base-uncased""": 5_12,
"""distilbert-base-uncased-distilled-squad""": 5_12,
"""distilbert-base-cased""": 5_12,
"""distilbert-base-cased-distilled-squad""": 5_12,
"""distilbert-base-german-cased""": 5_12,
"""distilbert-base-multilingual-cased""": 5_12,
}
UpperCamelCase_ = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class a_ (_a ):
__lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
__lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[int] = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : Union[str, Any] = DistilBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : List[str] = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : Any = do_lower_case
_lowerCAmelCase : Dict = strip_accents
_lowerCAmelCase : str = tokenize_chinese_chars
_lowerCAmelCase : Any = normalizer_class(**snake_case_ )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : int = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase_ = 0
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase_ = tuple[int, int]
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = pos_x
_lowerCAmelCase : List[str] = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : List[Any] = goal_x
_lowerCAmelCase : int = goal_y
_lowerCAmelCase : Union[str, Any] = g_cost
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = self.calculate_heuristic()
_lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.pos_x - self.goal_x
_lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : int = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_lowerCAmelCase : List[str] = current_bwd_node
_lowerCAmelCase : Dict = current_fwd_node
_lowerCAmelCase : Any = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = AStar(init, goal)
UpperCamelCase_ = a_star.search()
UpperCamelCase_ = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalAStar(init, goal)
UpperCamelCase_ = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 309
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list ) -> list:
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(_lowerCamelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_lowerCamelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 309
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309
| 1
|
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class a_ (_a , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = CpmAntTokenizer
__lowerCAmelCase : Optional[Any] = False
def __UpperCamelCase ( self ):
super().setUp()
_lowerCAmelCase : Tuple = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowerCAmelCase : Union[str, Any] = """今天天气真好!"""
_lowerCAmelCase : int = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowerCAmelCase : Optional[Any] = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[Any] = """今天天气真好!"""
_lowerCAmelCase : List[Any] = [tokenizer.bos_token] + tokens
_lowerCAmelCase : Tuple = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
| 309
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : List[Any] = min(_lowerCamelCase ) # min() finds the minimum value
_lowerCAmelCase : Tuple = max(_lowerCamelCase ) # max() finds the maximum value
_lowerCAmelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase : Dict = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase : Any = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase : Optional[int] = count + min_val
i += 1
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" , """ """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 309
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int ) -> bool:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
_lowerCAmelCase : str = str(_lowerCamelCase )
_lowerCAmelCase : Tuple = """""".join(sorted(_lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _UpperCAmelCase ( _lowerCamelCase : float = 99 ) -> int:
if not 0 < percent < 1_00:
raise ValueError("""solution() only accepts values from 0 to 100""" )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 1
while True:
if check_bouncy(_lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(99)}')
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309
| 1
|
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a_ (_a , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
__lowerCAmelCase : int = """ssube/stable-diffusion-x4-upscaler-onnx"""
def __UpperCamelCase ( self , snake_case_=0 ):
_lowerCAmelCase : Any = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case_ )
_lowerCAmelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs()
_lowerCAmelCase : List[Any] = pipe(**snake_case_ ).images
_lowerCAmelCase : int = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCAmelCase : int = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Dict = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Dict = self.get_dummy_inputs()
_lowerCAmelCase : List[str] = pipe(**snake_case_ ).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCAmelCase : Optional[int] = np.array(
[0.689_8892, 0.5924_0556, 0.5249_9527, 0.5886_6215, 0.5225_8235, 0.5257_2715, 0.6241_4473, 0.617_4387, 0.621_4964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Any = self.get_dummy_inputs()
_lowerCAmelCase : Union[str, Any] = pipe(**snake_case_ ).images
_lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCAmelCase : Any = np.array(
[0.765_9278, 0.7643_7664, 0.7557_9107, 0.769_1116, 0.7766_6986, 0.772_7672, 0.775_8664, 0.781_2226, 0.7694_2515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**snake_case_ ).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCAmelCase : int = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs()
_lowerCAmelCase : Optional[int] = pipe(**snake_case_ ).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCAmelCase : Any = np.array(
[0.7742_4496, 0.77_3601, 0.764_5288, 0.776_9598, 0.777_2739, 0.773_8688, 0.7818_7233, 0.7787_9584, 0.76_7043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class a_ (unittest.TestCase ):
@property
def __UpperCamelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = ort.SessionOptions()
_lowerCAmelCase : Tuple = False
return options
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowerCAmelCase : List[str] = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
_lowerCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : List[str] = """A fantasy landscape, trending on artstation"""
_lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
_lowerCAmelCase : Tuple = pipe(
prompt=snake_case_ , image=snake_case_ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=snake_case_ , output_type="""np""" , )
_lowerCAmelCase : str = output.images
_lowerCAmelCase : Union[str, Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCAmelCase : Optional[Any] = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowerCAmelCase : List[Any] = init_image.resize((1_2_8, 1_2_8) )
_lowerCAmelCase : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
_lowerCAmelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : List[str] = """A fantasy landscape, trending on artstation"""
_lowerCAmelCase : Tuple = torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = pipe(
prompt=snake_case_ , image=snake_case_ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=snake_case_ , output_type="""np""" , )
_lowerCAmelCase : Union[str, Any] = output.images
_lowerCAmelCase : Tuple = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCAmelCase : Dict = np.array(
[0.5017_3753, 0.5022_3356, 0.50_2039, 0.5023_3036, 0.502_3725, 0.502_2601, 0.501_8758, 0.5023_4085, 0.5024_1566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCamelCase_ = random.Random()
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : int=None , _lowerCamelCase : int=None ) -> str:
if rng is None:
_lowerCAmelCase : List[str] = global_rng
_lowerCAmelCase : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a_ (unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=4_0_0 , snake_case_=2_0_0_0 , snake_case_=2_4 , snake_case_=2_4 , snake_case_=0.0 , snake_case_=1_6_0_0_0 , snake_case_=True , snake_case_=True , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : str = min_seq_length
_lowerCAmelCase : str = max_seq_length
_lowerCAmelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase : Tuple = feature_size
_lowerCAmelCase : Tuple = num_mel_bins
_lowerCAmelCase : Tuple = padding_value
_lowerCAmelCase : Union[str, Any] = sampling_rate
_lowerCAmelCase : Optional[int] = return_attention_mask
_lowerCAmelCase : Union[str, Any] = do_normalize
def __UpperCamelCase ( self ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase ( self , snake_case_=False , snake_case_=False ):
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
_lowerCAmelCase : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCAmelCase : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase : List[str] = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a_ (_a , unittest.TestCase ):
__lowerCAmelCase : str = SpeechaTextFeatureExtractor if is_speech_available() else None
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = SpeechaTextFeatureExtractionTester(self )
def __UpperCamelCase ( self , snake_case_ ):
self.assertTrue(np.all(np.mean(snake_case_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case_ , axis=0 ) - 1 ) < 1E-3 ) )
def __UpperCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCAmelCase : Optional[Any] = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test feature size
_lowerCAmelCase : Union[str, Any] = feature_extractor(snake_case_ , padding=snake_case_ , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
_lowerCAmelCase : Union[str, Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_lowerCAmelCase : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# Test batched
_lowerCAmelCase : str = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
_lowerCAmelCase : Any = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCAmelCase : Optional[int] = np.asarray(snake_case_ )
_lowerCAmelCase : Any = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
_lowerCAmelCase : Union[str, Any] = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCAmelCase : Tuple = ["""longest""", """max_length""", """do_not_pad"""]
_lowerCAmelCase : List[Any] = [None, 1_6, None]
for max_length, padding in zip(snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = feature_extractor(
snake_case_ , padding=snake_case_ , max_length=snake_case_ , return_attention_mask=snake_case_ )
_lowerCAmelCase : Dict = inputs.input_features
_lowerCAmelCase : Any = inputs.attention_mask
_lowerCAmelCase : List[Any] = [np.sum(snake_case_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCAmelCase : Dict = ["""longest""", """max_length""", """do_not_pad"""]
_lowerCAmelCase : int = [None, 1_6, None]
for max_length, padding in zip(snake_case_ , snake_case_ ):
_lowerCAmelCase : int = feature_extractor(
snake_case_ , max_length=snake_case_ , padding=snake_case_ , return_tensors="""np""" , return_attention_mask=snake_case_ )
_lowerCAmelCase : List[Any] = inputs.input_features
_lowerCAmelCase : Optional[int] = inputs.attention_mask
_lowerCAmelCase : Optional[Any] = [np.sum(snake_case_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCAmelCase : int = feature_extractor(
snake_case_ , padding="""max_length""" , max_length=4 , truncation=snake_case_ , return_tensors="""np""" , return_attention_mask=snake_case_ , )
_lowerCAmelCase : str = inputs.input_features
_lowerCAmelCase : Any = inputs.attention_mask
_lowerCAmelCase : Any = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCAmelCase : Union[str, Any] = feature_extractor(
snake_case_ , padding="""longest""" , max_length=4 , truncation=snake_case_ , return_tensors="""np""" , return_attention_mask=snake_case_ , )
_lowerCAmelCase : Dict = inputs.input_features
_lowerCAmelCase : List[str] = inputs.attention_mask
_lowerCAmelCase : int = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4) )
_lowerCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCAmelCase : Optional[Any] = feature_extractor(
snake_case_ , padding="""longest""" , max_length=1_6 , truncation=snake_case_ , return_tensors="""np""" , return_attention_mask=snake_case_ , )
_lowerCAmelCase : str = inputs.input_features
_lowerCAmelCase : Any = inputs.attention_mask
_lowerCAmelCase : Union[str, Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4) )
def __UpperCamelCase ( self ):
import torch
_lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Any = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_lowerCAmelCase : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCAmelCase : Union[str, Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCAmelCase : int = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCamelCase ( self , snake_case_ ):
from datasets import load_dataset
_lowerCAmelCase : Any = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_lowerCAmelCase : Tuple = ds.sort("""id""" ).select(range(snake_case_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __UpperCamelCase ( self ):
# fmt: off
_lowerCAmelCase : Dict = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
_lowerCAmelCase : Union[str, Any] = self._load_datasamples(1 )
_lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Union[str, Any] = feature_extractor(snake_case_ , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) )
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , snake_case_ , atol=1E-4 ) )
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""DPTFeatureExtractor"""]
UpperCamelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 309
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : list[float] ) -> Dict:
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ (_a ):
__lowerCAmelCase : Any = """new-model"""
if is_tf_available():
class a_ (_a ):
__lowerCAmelCase : str = NewModelConfig
@require_tf
class a_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = """bert-base-cased"""
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = TFAutoModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = """bert-base-cased"""
_lowerCAmelCase : int = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[Any] = TFAutoModelForPreTraining.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = TFAutoModelForCausalLM.from_pretrained(snake_case_ )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = TFAutoModelForCausalLM.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
_lowerCAmelCase : Tuple = TFAutoModelWithLMHead.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : int = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(snake_case_ )
_lowerCAmelCase , _lowerCAmelCase : Tuple = TFAutoModelForMaskedLM.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case_ )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def __UpperCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def __UpperCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase : Dict = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = TFAutoModelForQuestionAnswering.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
@require_tensorflow_probability
def __UpperCamelCase ( self ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_lowerCAmelCase : Any = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
_lowerCAmelCase : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(snake_case_ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(
snake_case_ , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = TFAutoModelWithLMHead.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=snake_case_ ) , 1_4_4_1_0 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = TFAutoModelWithLMHead.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=snake_case_ ) , 1_4_4_1_0 )
def __UpperCamelCase ( self ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
_lowerCAmelCase : List[str] = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = copy.deepcopy(model.config )
_lowerCAmelCase : List[Any] = ["""FunnelBaseModel"""]
_lowerCAmelCase : List[str] = TFAutoModel.from_config(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case_ )
_lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
try:
AutoConfig.register("""new-model""" , snake_case_ )
_lowerCAmelCase : Union[str, Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(snake_case_ ):
auto_class.register(snake_case_ , snake_case_ )
auto_class.register(snake_case_ , snake_case_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case_ ):
auto_class.register(snake_case_ , snake_case_ )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCAmelCase : Tuple = BertModelTester(self ).get_config()
_lowerCAmelCase : Dict = NewModelConfig(**tiny_config.to_dict() )
_lowerCAmelCase : Any = auto_class.from_config(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case_ )
_lowerCAmelCase : Optional[int] = auto_class.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __UpperCamelCase ( self ):
with self.assertRaisesRegex(
snake_case_ , """bert-base is not a local folder and is not a valid model identifier""" ):
_lowerCAmelCase : Any = TFAutoModel.from_pretrained("""bert-base""" )
def __UpperCamelCase ( self ):
with self.assertRaisesRegex(
snake_case_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained(snake_case_ , revision="""aaaaaa""" )
def __UpperCamelCase ( self ):
with self.assertRaisesRegex(
snake_case_ , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
_lowerCAmelCase : str = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def __UpperCamelCase ( self ):
with self.assertRaisesRegex(snake_case_ , """Use `from_pt=True` to load this model""" ):
_lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def __UpperCamelCase ( self ):
# Make sure we have cached the model.
_lowerCAmelCase : Any = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
_lowerCAmelCase : Any = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
_lowerCAmelCase : Optional[Any] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
_lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 309
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCamelCase_ = random.Random()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : int=1.0 , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : List[str]=None ) -> Any:
if rng is None:
_lowerCAmelCase : List[Any] = global_rng
_lowerCAmelCase : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class a_ (unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=4_0_0 , snake_case_=2_0_0_0 , snake_case_=1 , snake_case_=0.0 , snake_case_=1_6_0_0_0 , snake_case_=True , snake_case_=True , ):
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : Optional[int] = min_seq_length
_lowerCAmelCase : Optional[Any] = max_seq_length
_lowerCAmelCase : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase : Optional[Any] = feature_size
_lowerCAmelCase : Optional[int] = padding_value
_lowerCAmelCase : List[Any] = sampling_rate
_lowerCAmelCase : Tuple = return_attention_mask
_lowerCAmelCase : Dict = do_normalize
def __UpperCamelCase ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase ( self , snake_case_=False , snake_case_=False ):
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
_lowerCAmelCase : Optional[int] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_lowerCAmelCase : List[str] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase : Any = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
class a_ (_a , unittest.TestCase ):
__lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = WavaVecaFeatureExtractionTester(self )
def __UpperCamelCase ( self , snake_case_ ):
self.assertTrue(np.all(np.mean(snake_case_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case_ , axis=0 ) - 1 ) < 1E-3 ) )
def __UpperCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCAmelCase : Dict = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test not batched input
_lowerCAmelCase : Dict = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
_lowerCAmelCase : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# Test batched
_lowerCAmelCase : List[Any] = feat_extract(snake_case_ , return_tensors="""np""" ).input_values
_lowerCAmelCase : List[str] = feat_extract(snake_case_ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCAmelCase : Any = np.asarray(snake_case_ )
_lowerCAmelCase : List[Any] = feat_extract(snake_case_ , return_tensors="""np""" ).input_values
_lowerCAmelCase : Tuple = feat_extract(snake_case_ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCAmelCase : Tuple = ["""longest""", """max_length""", """do_not_pad"""]
_lowerCAmelCase : Any = [None, 1_6_0_0, None]
for max_length, padding in zip(snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = feat_extract(snake_case_ , padding=snake_case_ , max_length=snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : str = range(8_0_0 , 1_4_0_0 , 2_0_0 )
_lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in lengths]
_lowerCAmelCase : str = ["""longest""", """max_length""", """do_not_pad"""]
_lowerCAmelCase : str = [None, 1_6_0_0, None]
for max_length, padding in zip(snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = feat_extract(snake_case_ , max_length=snake_case_ , padding=snake_case_ )
_lowerCAmelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCAmelCase : List[Any] = feat_extract(
snake_case_ , truncation=snake_case_ , max_length=1_0_0_0 , padding="""max_length""" , return_tensors="""np""" )
_lowerCAmelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCAmelCase : Union[str, Any] = feat_extract(
snake_case_ , truncation=snake_case_ , max_length=1_0_0_0 , padding="""longest""" , return_tensors="""np""" )
_lowerCAmelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
_lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCAmelCase : Dict = feat_extract(
snake_case_ , truncation=snake_case_ , max_length=2_0_0_0 , padding="""longest""" , return_tensors="""np""" )
_lowerCAmelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def __UpperCamelCase ( self ):
import torch
_lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : List[Any] = np.random.rand(1_0_0 ).astype(np.floataa )
_lowerCAmelCase : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCAmelCase : Optional[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_lowerCAmelCase : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def __UpperCamelCase ( self ):
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_lowerCAmelCase : Union[str, Any] = WavaVecaConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(snake_case_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 309
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309
| 1
|
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] ) -> str:
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
_lowerCAmelCase : Union[str, Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
_lowerCAmelCase : List[str] = np.concatenate(_lowerCamelCase , axis=0 )
_lowerCAmelCase : List[str] = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
_lowerCAmelCase : Optional[Any] = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase : Any = 2.0 * image - 1.0
_lowerCAmelCase : Union[str, Any] = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase : Any = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : str=0.9995 ) -> Optional[Any]:
if not isinstance(_lowerCamelCase , np.ndarray ):
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : List[Any] = va.device
_lowerCAmelCase : Dict = va.cpu().numpy()
_lowerCAmelCase : Optional[Any] = va.cpu().numpy()
_lowerCAmelCase : Union[str, Any] = np.sum(va * va / (np.linalg.norm(_lowerCamelCase ) * np.linalg.norm(_lowerCamelCase )) )
if np.abs(_lowerCamelCase ) > DOT_THRESHOLD:
_lowerCAmelCase : Optional[int] = (1 - t) * va + t * va
else:
_lowerCAmelCase : Tuple = np.arccos(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = np.sin(_lowerCamelCase )
_lowerCAmelCase : str = theta_a * t
_lowerCAmelCase : Tuple = np.sin(_lowerCamelCase )
_lowerCAmelCase : int = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCAmelCase : List[Any] = sin_theta_t / sin_theta_a
_lowerCAmelCase : Optional[Any] = sa * va + sa * va
if inputs_are_torch:
_lowerCAmelCase : Tuple = torch.from_numpy(_lowerCamelCase ).to(_lowerCamelCase )
return va
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : List[str] ) -> Dict:
_lowerCAmelCase : List[str] = F.normalize(_lowerCamelCase , dim=-1 )
_lowerCAmelCase : Dict = F.normalize(_lowerCamelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
for param in model.parameters():
_lowerCAmelCase : Tuple = value
class a_ (_a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , ):
super().__init__()
self.register_modules(
vae=snake_case_ , text_encoder=snake_case_ , clip_model=snake_case_ , tokenizer=snake_case_ , unet=snake_case_ , scheduler=snake_case_ , feature_extractor=snake_case_ , coca_model=snake_case_ , coca_tokenizer=snake_case_ , coca_transform=snake_case_ , )
_lowerCAmelCase : str = (
feature_extractor.size
if isinstance(feature_extractor.size , snake_case_ )
else feature_extractor.size["""shortest_edge"""]
)
_lowerCAmelCase : Optional[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , snake_case_ )
set_requires_grad(self.clip_model , snake_case_ )
def __UpperCamelCase ( self , snake_case_ = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case_ )
def __UpperCamelCase ( self ):
self.enable_attention_slicing(snake_case_ )
def __UpperCamelCase ( self ):
set_requires_grad(self.vae , snake_case_ )
def __UpperCamelCase ( self ):
set_requires_grad(self.vae , snake_case_ )
def __UpperCamelCase ( self ):
set_requires_grad(self.unet , snake_case_ )
def __UpperCamelCase ( self ):
set_requires_grad(self.unet , snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
# get the original timestep using init_timestep
_lowerCAmelCase : List[Any] = min(int(num_inference_steps * strength ) , snake_case_ )
_lowerCAmelCase : Dict = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
if not isinstance(snake_case_ , torch.Tensor ):
raise ValueError(f'`image` has to be of type `torch.Tensor` but is {type(snake_case_ )}' )
_lowerCAmelCase : str = image.to(device=snake_case_ , dtype=snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case_ )
]
_lowerCAmelCase : Any = torch.cat(snake_case_ , dim=0 )
else:
_lowerCAmelCase : Dict = self.vae.encode(snake_case_ ).latent_dist.sample(snake_case_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase : Optional[int] = 0.1_8215 * init_latents
_lowerCAmelCase : Dict = init_latents.repeat_interleave(snake_case_ , dim=0 )
_lowerCAmelCase : List[Any] = randn_tensor(init_latents.shape , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
# get latents
_lowerCAmelCase : Any = self.scheduler.add_noise(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = init_latents
return latents
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : int = self.coca_transform(snake_case_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCAmelCase : Optional[int] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowerCAmelCase : Dict = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.feature_extractor.preprocess(snake_case_ )
_lowerCAmelCase : Tuple = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
_lowerCAmelCase : Union[str, Any] = self.clip_model.get_image_features(snake_case_ )
_lowerCAmelCase : Optional[int] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case_ )
_lowerCAmelCase : List[Any] = image_embeddings_clip.repeat_interleave(snake_case_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = latents.detach().requires_grad_()
_lowerCAmelCase : Optional[Any] = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_lowerCAmelCase : List[str] = self.unet(snake_case_ , snake_case_ , encoder_hidden_states=snake_case_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowerCAmelCase : Dict = self.scheduler.alphas_cumprod[timestep]
_lowerCAmelCase : Optional[Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase : Tuple = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCAmelCase : str = torch.sqrt(snake_case_ )
_lowerCAmelCase : Tuple = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , snake_case_ ):
_lowerCAmelCase : Any = self.scheduler.sigmas[index]
_lowerCAmelCase : Tuple = latents - sigma * noise_pred
else:
raise ValueError(f'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase : List[Any] = 1 / 0.1_8215 * sample
_lowerCAmelCase : List[Any] = self.vae.decode(snake_case_ ).sample
_lowerCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase : Dict = transforms.Resize(self.feature_extractor_size )(snake_case_ )
_lowerCAmelCase : Any = self.normalize(snake_case_ ).to(latents.dtype )
_lowerCAmelCase : Optional[int] = self.clip_model.get_image_features(snake_case_ )
_lowerCAmelCase : List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case_ )
_lowerCAmelCase : List[Any] = spherical_dist_loss(snake_case_ , snake_case_ ).mean() * clip_guidance_scale
_lowerCAmelCase : Any = -torch.autograd.grad(snake_case_ , snake_case_ )[0]
if isinstance(self.scheduler , snake_case_ ):
_lowerCAmelCase : int = latents.detach() + grads * (sigma**2)
_lowerCAmelCase : Dict = noise_pred_original
else:
_lowerCAmelCase : Tuple = noise_pred_original - torch.sqrt(snake_case_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = 5_1_2 , snake_case_ = 5_1_2 , snake_case_ = 0.6 , snake_case_ = 5_0 , snake_case_ = 7.5 , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = 1_0_0 , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , snake_case_ = 0.8 , snake_case_ = 0.1 , snake_case_ = 0.1 , ):
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != batch_size:
raise ValueError(f'You have passed {batch_size} batch_size, but only {len(snake_case_ )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(snake_case_ , torch.Generator ) and batch_size > 1:
_lowerCAmelCase : List[Any] = [generator] + [None] * (batch_size - 1)
_lowerCAmelCase : Union[str, Any] = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
_lowerCAmelCase : Optional[Any] = [x[0] for x in coca_is_none if x[1]]
_lowerCAmelCase : List[Any] = """, """.join(snake_case_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(snake_case_ ):
raise ValueError(
f'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
f'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
_lowerCAmelCase : Dict = self.get_image_description(snake_case_ )
if style_prompt is None:
if len(snake_case_ ):
raise ValueError(
f'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
f' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
_lowerCAmelCase : Optional[int] = self.get_image_description(snake_case_ )
# get prompt text embeddings for content and style
_lowerCAmelCase : Union[str, Any] = self.tokenizer(
snake_case_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="""pt""" , )
_lowerCAmelCase : Optional[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase : Any = self.tokenizer(
snake_case_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="""pt""" , )
_lowerCAmelCase : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase : List[str] = slerp(snake_case_ , snake_case_ , snake_case_ )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase : Optional[Any] = text_embeddings.repeat_interleave(snake_case_ , dim=0 )
# set timesteps
_lowerCAmelCase : List[Any] = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowerCAmelCase : int = {}
if accepts_offset:
_lowerCAmelCase : str = 1
self.scheduler.set_timesteps(snake_case_ , **snake_case_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.get_timesteps(snake_case_ , snake_case_ , self.device )
_lowerCAmelCase : Dict = timesteps[:1].repeat(snake_case_ )
# Preprocess image
_lowerCAmelCase : List[str] = preprocess(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = self.prepare_latents(
snake_case_ , snake_case_ , snake_case_ , text_embeddings.dtype , self.device , snake_case_ )
_lowerCAmelCase : Optional[Any] = preprocess(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = self.prepare_latents(
snake_case_ , snake_case_ , snake_case_ , text_embeddings.dtype , self.device , snake_case_ )
_lowerCAmelCase : List[str] = slerp(snake_case_ , snake_case_ , snake_case_ )
if clip_guidance_scale > 0:
_lowerCAmelCase : Dict = self.get_clip_image_embeddings(snake_case_ , snake_case_ )
_lowerCAmelCase : int = self.get_clip_image_embeddings(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = slerp(
snake_case_ , snake_case_ , snake_case_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase : List[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase : int = content_text_input.input_ids.shape[-1]
_lowerCAmelCase : List[str] = self.tokenizer([""""""] , padding="""max_length""" , max_length=snake_case_ , return_tensors="""pt""" )
_lowerCAmelCase : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCAmelCase : List[str] = uncond_embeddings.repeat_interleave(snake_case_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase : str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase : Union[str, Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCAmelCase : Optional[Any] = torch.randn(snake_case_ , generator=snake_case_ , device="""cpu""" , dtype=snake_case_ ).to(
self.device )
else:
_lowerCAmelCase : int = torch.randn(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
_lowerCAmelCase : Union[str, Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase : Optional[Any] = {}
if accepts_eta:
_lowerCAmelCase : Optional[Any] = eta
# check if the scheduler accepts generator
_lowerCAmelCase : Optional[Any] = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowerCAmelCase : int = generator
with self.progress_bar(total=snake_case_ ):
for i, t in enumerate(snake_case_ ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : str = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_lowerCAmelCase : Optional[Any] = self.unet(snake_case_ , snake_case_ , encoder_hidden_states=snake_case_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCAmelCase : Any = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.cond_fn(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : Dict = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase : List[Any] = 1 / 0.1_8215 * latents
_lowerCAmelCase : List[str] = self.vae.decode(snake_case_ ).sample
_lowerCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase : Dict = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=snake_case_ , nsfw_content_detected=snake_case_ )
| 309
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**snake_case_ )
_lowerCAmelCase : str = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309
| 1
|
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str ) -> List[Any]:
def get_masked_lm_array(_lowerCamelCase : str ):
_lowerCAmelCase : Optional[int] = f'masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'
_lowerCAmelCase : Optional[int] = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
if "kernel" in name:
_lowerCAmelCase : int = array.transpose()
return torch.from_numpy(_lowerCamelCase )
def get_encoder_array(_lowerCamelCase : str ):
_lowerCAmelCase : Union[str, Any] = f'encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'
_lowerCAmelCase : str = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
if "kernel" in name:
_lowerCAmelCase : Any = array.transpose()
return torch.from_numpy(_lowerCamelCase )
def get_encoder_layer_array(_lowerCamelCase : int , _lowerCamelCase : str ):
_lowerCAmelCase : str = f'encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'
_lowerCAmelCase : Any = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
if "kernel" in name:
_lowerCAmelCase : List[Any] = array.transpose()
return torch.from_numpy(_lowerCamelCase )
def get_encoder_attention_layer_array(_lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : List[Any] ):
_lowerCAmelCase : Union[str, Any] = f'encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'
_lowerCAmelCase : Union[str, Any] = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = array.reshape(_lowerCamelCase )
if "kernel" in name:
_lowerCAmelCase : Optional[Any] = array.transpose()
return torch.from_numpy(_lowerCamelCase )
print(f'Loading model based on config from {config_path}...' )
_lowerCAmelCase : str = BertConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = BertForMaskedLM(_lowerCamelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_lowerCAmelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
_lowerCAmelCase : BertSelfAttention = layer.attention.self
_lowerCAmelCase : Any = get_encoder_attention_layer_array(
_lowerCamelCase , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
_lowerCAmelCase : Tuple = get_encoder_attention_layer_array(
_lowerCamelCase , """_query_dense/bias""" , self_attn.query.bias.data.shape )
_lowerCAmelCase : int = get_encoder_attention_layer_array(
_lowerCamelCase , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
_lowerCAmelCase : Any = get_encoder_attention_layer_array(
_lowerCamelCase , """_key_dense/bias""" , self_attn.key.bias.data.shape )
_lowerCAmelCase : Any = get_encoder_attention_layer_array(
_lowerCamelCase , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
_lowerCAmelCase : List[Any] = get_encoder_attention_layer_array(
_lowerCamelCase , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
_lowerCAmelCase : BertSelfOutput = layer.attention.output
_lowerCAmelCase : List[str] = get_encoder_attention_layer_array(
_lowerCamelCase , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
_lowerCAmelCase : Optional[Any] = get_encoder_attention_layer_array(
_lowerCamelCase , """_output_dense/bias""" , self_output.dense.bias.data.shape )
_lowerCAmelCase : Tuple = get_encoder_layer_array(_lowerCamelCase , """_attention_layer_norm/gamma""" )
_lowerCAmelCase : Optional[int] = get_encoder_layer_array(_lowerCamelCase , """_attention_layer_norm/beta""" )
# Intermediate
_lowerCAmelCase : BertIntermediate = layer.intermediate
_lowerCAmelCase : List[Any] = get_encoder_layer_array(_lowerCamelCase , """_intermediate_dense/kernel""" )
_lowerCAmelCase : Dict = get_encoder_layer_array(_lowerCamelCase , """_intermediate_dense/bias""" )
# Output
_lowerCAmelCase : BertOutput = layer.output
_lowerCAmelCase : Any = get_encoder_layer_array(_lowerCamelCase , """_output_dense/kernel""" )
_lowerCAmelCase : Any = get_encoder_layer_array(_lowerCamelCase , """_output_dense/bias""" )
_lowerCAmelCase : Optional[int] = get_encoder_layer_array(_lowerCamelCase , """_output_layer_norm/gamma""" )
_lowerCAmelCase : List[str] = get_encoder_layer_array(_lowerCamelCase , """_output_layer_norm/beta""" )
# Embeddings
_lowerCAmelCase : List[str] = get_encoder_array("""_position_embedding_layer/embeddings""" )
_lowerCAmelCase : Tuple = get_encoder_array("""_type_embedding_layer/embeddings""" )
_lowerCAmelCase : List[Any] = get_encoder_array("""_embedding_norm_layer/gamma""" )
_lowerCAmelCase : Any = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
_lowerCAmelCase : List[str] = model.cls.predictions.transform
_lowerCAmelCase : str = get_masked_lm_array("""dense/kernel""" )
_lowerCAmelCase : List[str] = get_masked_lm_array("""dense/bias""" )
_lowerCAmelCase : List[Any] = get_masked_lm_array("""layer_norm/gamma""" )
_lowerCAmelCase : List[Any] = get_masked_lm_array("""layer_norm/beta""" )
_lowerCAmelCase : Union[str, Any] = get_masked_lm_array("""embedding_table""" )
# Pooling
_lowerCAmelCase : int = BertPooler(config=_lowerCamelCase )
_lowerCAmelCase : BertPooler = get_encoder_array("""_pooler_layer/kernel""" )
_lowerCAmelCase : BertPooler = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(_lowerCamelCase )
# Integration test - should load without any errors ;)
_lowerCAmelCase : Optional[Any] = BertForMaskedLM.from_pretrained(_lowerCamelCase )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
UpperCamelCase_ = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 309
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 1
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCamelCase_ = {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[str] = TaTokenizer
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_=None , snake_case_=None , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_=1_0_0 , snake_case_=None , **snake_case_ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_lowerCAmelCase : int = [f'<extra_id_{i}>' for i in range(snake_case_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_lowerCAmelCase : int = len(set(filter(lambda snake_case_ : bool("""extra_id_""" in str(snake_case_ ) ) , snake_case_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , extra_ids=snake_case_ , additional_special_tokens=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Union[str, Any] = vocab_file
_lowerCAmelCase : List[str] = False if not self.vocab_file else True
_lowerCAmelCase : Any = extra_ids
@staticmethod
def __UpperCamelCase ( snake_case_ , snake_case_ , snake_case_ ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_lowerCAmelCase : Tuple = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , snake_case_ , )
return max_model_length
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : List[str] = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
logger.info(f'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_lowerCAmelCase : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __UpperCamelCase ( self ):
return list(
set(filter(lambda snake_case_ : bool(re.search(r"""<extra_id_\d+>""" , snake_case_ ) ) is not None , self.additional_special_tokens ) ) )
def __UpperCamelCase ( self ):
return [self.convert_tokens_to_ids(snake_case_ ) for token in self.get_sentinel_tokens()]
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class a_ (_a ):
__lowerCAmelCase : Dict = """convbert"""
def __init__( self , snake_case_=3_0_5_2_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=7_6_8 , snake_case_=2 , snake_case_=9 , snake_case_=1 , snake_case_=None , **snake_case_ , ):
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : List[Any] = type_vocab_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Dict = layer_norm_eps
_lowerCAmelCase : List[str] = embedding_size
_lowerCAmelCase : Dict = head_ratio
_lowerCAmelCase : Dict = conv_kernel_size
_lowerCAmelCase : int = num_groups
_lowerCAmelCase : str = classifier_dropout
class a_ (_a ):
@property
def __UpperCamelCase ( self ):
if self.task == "multiple-choice":
_lowerCAmelCase : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 309
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase_ = 0
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase_ = tuple[int, int]
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = pos_x
_lowerCAmelCase : List[str] = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : List[Any] = goal_x
_lowerCAmelCase : int = goal_y
_lowerCAmelCase : Union[str, Any] = g_cost
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = self.calculate_heuristic()
_lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.pos_x - self.goal_x
_lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : int = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_lowerCAmelCase : List[str] = current_bwd_node
_lowerCAmelCase : Dict = current_fwd_node
_lowerCAmelCase : Any = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = AStar(init, goal)
UpperCamelCase_ = a_star.search()
UpperCamelCase_ = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalAStar(init, goal)
UpperCamelCase_ = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 309
| 1
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger()
@dataclass
class a_ :
__lowerCAmelCase : nn.Module
__lowerCAmelCase : List[nn.Module] = field(default_factory=_a )
__lowerCAmelCase : list = field(default_factory=_a )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = len(list(m.modules() ) ) == 1 or isinstance(snake_case_ , nn.Convad ) or isinstance(snake_case_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case_ )
def __call__( self , snake_case_ ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case_ )
[x.remove() for x in self.handles]
return self
@property
def __UpperCamelCase ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a_ :
__lowerCAmelCase : nn.Module
__lowerCAmelCase : nn.Module
__lowerCAmelCase : int = 0
__lowerCAmelCase : List = field(default_factory=_a )
__lowerCAmelCase : List = field(default_factory=_a )
def __call__( self , snake_case_ ):
_lowerCAmelCase : Dict = Tracker(self.dest )(snake_case_ ).parametrized
_lowerCAmelCase : str = Tracker(self.src )(snake_case_ ).parametrized
_lowerCAmelCase : str = list(filter(lambda snake_case_ : type(snake_case_ ) not in self.src_skip , snake_case_ ) )
_lowerCAmelCase : Optional[Any] = list(filter(lambda snake_case_ : type(snake_case_ ) not in self.dest_skip , snake_case_ ) )
if len(snake_case_ ) != len(snake_case_ ):
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case_ )} operations while'
f' destination module has {len(snake_case_ )}.' )
for dest_m, src_m in zip(snake_case_ , snake_case_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : ResNetConfig , _lowerCamelCase : Path , _lowerCamelCase : bool = True ) -> Tuple:
print(f'Converting {name}...' )
with torch.no_grad():
_lowerCAmelCase : Dict = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase ).eval()
_lowerCAmelCase : Any = ResNetForImageClassification(_lowerCamelCase ).eval()
_lowerCAmelCase : Any = ModuleTransfer(src=_lowerCamelCase , dest=_lowerCamelCase )
_lowerCAmelCase : Tuple = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(_lowerCamelCase )
assert torch.allclose(from_model(_lowerCamelCase ) , our_model(_lowerCamelCase ).logits ), "The model logits don't match the original one."
_lowerCAmelCase : Union[str, Any] = f'resnet{"-".join(name.split("resnet" ) )}'
print(_lowerCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=_lowerCamelCase , )
# we can use the convnext one
_lowerCAmelCase : int = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=_lowerCamelCase , )
print(f'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( _lowerCamelCase : Path , _lowerCamelCase : str = None , _lowerCamelCase : bool = True ) -> Optional[Any]:
_lowerCAmelCase : Tuple = """imagenet-1k-id2label.json"""
_lowerCAmelCase : List[str] = 10_00
_lowerCAmelCase : Dict = (1, num_labels)
_lowerCAmelCase : List[str] = """huggingface/label-files"""
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : int = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase : Tuple = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : str = idalabel
_lowerCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = partial(_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase )
_lowerCAmelCase : Dict = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(_lowerCamelCase , names_to_config[model_name] , _lowerCamelCase , _lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309
| 1
|
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
UpperCamelCase_ = []
UpperCamelCase_ = []
UpperCamelCase_ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
UpperCamelCase_ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
"""emoji""": True,
},
}
]
UpperCamelCase_ = 0
for log in Path().glob("""*.log"""):
UpperCamelCase_ = 0
with open(log, """r""") as f:
for line in f:
UpperCamelCase_ = json.loads(line)
if line.get("""nodeid""", """""") != "":
UpperCamelCase_ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
UpperCamelCase_ = F'{line["duration"]:.4f}'
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase_ = []
log.unlink()
UpperCamelCase_ = """"""
UpperCamelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase_ = []
UpperCamelCase_ = {}
for test in failed_tests:
UpperCamelCase_ = test[0].split("""::""")
UpperCamelCase_ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
UpperCamelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase_ = [test[0] for test in failed_table]
UpperCamelCase_ = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase_ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCamelCase_ = """Too many failed tests, please see the full report in the Action results."""
UpperCamelCase_ = len(err) + 10
UpperCamelCase_ = message[: 30_00 - offset] + F'\n...\n```\n{err}'
print(F'### {message}')
else:
UpperCamelCase_ = """No failed tests! 🤗"""
print(F'## {message}')
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
UpperCamelCase_ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
UpperCamelCase_ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
UpperCamelCase_ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
UpperCamelCase_ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
UpperCamelCase_ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
UpperCamelCase_ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase_ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase_ = row[0]
else:
UpperCamelCase_ = """"""
UpperCamelCase_ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase_ = 16
UpperCamelCase_ = 32
def _UpperCAmelCase ( _lowerCamelCase : Accelerator , _lowerCamelCase : int = 16 , _lowerCamelCase : str = "bert-base-cased" ) -> Optional[int]:
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_lowerCamelCase : Dict ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCAmelCase : Union[str, Any] = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_lowerCamelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCamelCase , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(_lowerCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
_lowerCAmelCase : Tuple = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] ) -> Tuple:
# Initialize accelerator
_lowerCAmelCase : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : Optional[int] = config["""lr"""]
_lowerCAmelCase : Optional[Any] = int(config["""num_epochs"""] )
_lowerCAmelCase : List[str] = int(config["""seed"""] )
_lowerCAmelCase : Any = int(config["""batch_size"""] )
_lowerCAmelCase : Tuple = args.model_name_or_path
set_seed(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = get_dataloaders(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
# Instantiate optimizer
_lowerCAmelCase : int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCAmelCase : Optional[int] = optimizer_cls(params=model.parameters() , lr=_lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
_lowerCAmelCase : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Optional[Any] = (len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCAmelCase : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=0 , num_training_steps=_lowerCamelCase , )
else:
_lowerCAmelCase : List[Any] = DummyScheduler(_lowerCamelCase , total_num_steps=_lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
_lowerCAmelCase : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
_lowerCAmelCase : Optional[Any] = 0
# Now we train the model
_lowerCAmelCase : Optional[Any] = evaluate.load("""glue""" , """mrpc""" )
_lowerCAmelCase : str = 0
_lowerCAmelCase : int = {}
for epoch in range(_lowerCamelCase , _lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Tuple = model(**_lowerCamelCase )
_lowerCAmelCase : Optional[int] = outputs.loss
_lowerCAmelCase : Dict = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_lowerCAmelCase : Optional[int] = 0
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowerCAmelCase , _lowerCAmelCase : Any = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Optional[int] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCAmelCase : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
_lowerCAmelCase : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , _lowerCamelCase )
_lowerCAmelCase : List[str] = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
_lowerCAmelCase : Tuple = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _UpperCAmelCase ( ) -> Optional[Any]:
_lowerCAmelCase : Tuple = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=_lowerCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_lowerCamelCase , )
parser.add_argument(
"""--output_dir""" , type=_lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=_lowerCamelCase , default=_lowerCamelCase , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=_lowerCamelCase , default=3 , help="""Number of train epochs.""" , )
_lowerCAmelCase : List[Any] = parser.parse_args()
_lowerCAmelCase : List[Any] = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 309
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309
| 1
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ (_a ):
__lowerCAmelCase : Any = (DEISMultistepScheduler,)
__lowerCAmelCase : Tuple = (("""num_inference_steps""", 2_5),)
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : Dict = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self , snake_case_=0 , **snake_case_ ):
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Tuple = kwargs.pop("""num_inference_steps""" , snake_case_ )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Union[str, Any] = self.get_scheduler_config(**snake_case_ )
_lowerCAmelCase : int = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
_lowerCAmelCase : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
_lowerCAmelCase : Any = scheduler_class.from_pretrained(snake_case_ )
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
_lowerCAmelCase : int = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = sample, sample
for t in range(snake_case_ , time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , snake_case_=0 , **snake_case_ ):
_lowerCAmelCase : List[str] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[Any] = kwargs.pop("""num_inference_steps""" , snake_case_ )
_lowerCAmelCase : int = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler_class.from_pretrained(snake_case_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_lowerCAmelCase : int = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self , snake_case_=None , **snake_case_ ):
if scheduler is None:
_lowerCAmelCase : Tuple = self.scheduler_classes[0]
_lowerCAmelCase : List[Any] = self.get_scheduler_config(**snake_case_ )
_lowerCAmelCase : Optional[Any] = scheduler_class(**snake_case_ )
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config(**snake_case_ )
_lowerCAmelCase : Optional[Any] = scheduler_class(**snake_case_ )
_lowerCAmelCase : str = 1_0
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
return sample
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = dict(self.forward_default_kwargs )
_lowerCAmelCase : Union[str, Any] = kwargs.pop("""num_inference_steps""" , snake_case_ )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ )
_lowerCAmelCase : Dict = self.dummy_sample
_lowerCAmelCase : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case_ , """set_timesteps""" ):
scheduler.set_timesteps(snake_case_ )
elif num_inference_steps is not None and not hasattr(snake_case_ , """set_timesteps""" ):
_lowerCAmelCase : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
_lowerCAmelCase : Any = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : str = scheduler.timesteps[5]
_lowerCAmelCase : str = scheduler.timesteps[6]
_lowerCAmelCase : List[Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCamelCase ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_lowerCAmelCase : str = DEISMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : List[Any] = self.full_loop(scheduler=snake_case_ )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1E-3
_lowerCAmelCase : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Optional[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : int = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : int = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : int = self.full_loop(scheduler=snake_case_ )
_lowerCAmelCase : int = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1E-3
def __UpperCamelCase ( self ):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
self.check_over_configs(thresholding=snake_case_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case_ , prediction_type=snake_case_ , sample_max_value=snake_case_ , algorithm_type="""deis""" , solver_order=snake_case_ , solver_type=snake_case_ , )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case_ , solver_type=snake_case_ , prediction_type=snake_case_ , algorithm_type=snake_case_ , )
_lowerCAmelCase : Tuple = self.full_loop(
solver_order=snake_case_ , solver_type=snake_case_ , prediction_type=snake_case_ , algorithm_type=snake_case_ , )
assert not torch.isnan(snake_case_ ).any(), "Samples have nan numbers"
def __UpperCamelCase ( self ):
self.check_over_configs(lower_order_final=snake_case_ )
self.check_over_configs(lower_order_final=snake_case_ )
def __UpperCamelCase ( self ):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=snake_case_ , time_step=0 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.full_loop()
_lowerCAmelCase : List[Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.full_loop(prediction_type="""v_prediction""" )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : Any = self.get_scheduler_config(thresholding=snake_case_ , dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
_lowerCAmelCase : Tuple = 1_0
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : List[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Optional[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
assert sample.dtype == torch.floataa
| 309
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
| 1
|
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCamelCase_ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCamelCase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
if "://" in dataset_path:
_lowerCAmelCase : int = dataset_path.split("""://""" )[1]
return dataset_path
def _UpperCAmelCase ( _lowerCamelCase : fsspec.AbstractFileSystem ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _UpperCAmelCase ( _lowerCamelCase : fsspec.AbstractFileSystem , _lowerCamelCase : str , _lowerCamelCase : str ) -> int:
_lowerCAmelCase : List[str] = not is_remote_filesystem(_lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowerCamelCase ) , fs._strip_protocol(_lowerCamelCase ) )
else:
fs.mv(_lowerCamelCase , _lowerCamelCase , recursive=_lowerCamelCase )
def _UpperCAmelCase ( ) -> None:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Any = threading.Lock()
| 309
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
__lowerCAmelCase : Tuple = ["""pixel_values"""]
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PIL.Image.BICUBIC , snake_case_ = True , snake_case_ = None , snake_case_ = 1 / 2_5_5 , snake_case_ = True , snake_case_ = True , snake_case_ = None , snake_case_ = None , **snake_case_ , ):
super().__init__(**snake_case_ )
_lowerCAmelCase : Optional[int] = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
_lowerCAmelCase : Dict = get_size_dict(snake_case_ )
_lowerCAmelCase : Dict = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_lowerCAmelCase : Union[str, Any] = get_size_dict(snake_case_ , param_name="""crop_size""" )
_lowerCAmelCase : Optional[Any] = do_resize
_lowerCAmelCase : Dict = size
_lowerCAmelCase : Tuple = resample
_lowerCAmelCase : List[str] = do_center_crop
_lowerCAmelCase : Tuple = crop_size
_lowerCAmelCase : Optional[int] = do_rescale
_lowerCAmelCase : str = rescale_factor
_lowerCAmelCase : Union[str, Any] = do_normalize
_lowerCAmelCase : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = PIL.Image.BICUBIC , snake_case_ = None , **snake_case_ , ):
_lowerCAmelCase : Dict = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
snake_case_ , size=(size["""height"""], size["""width"""]) , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
_lowerCAmelCase : Any = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(snake_case_ , size=(size["""height"""], size["""width"""]) , data_format=snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_=None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ):
_lowerCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Tuple = resample if resample is not None else self.resample
_lowerCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : List[str] = size if size is not None else self.size
_lowerCAmelCase : Tuple = get_size_dict(snake_case_ )
_lowerCAmelCase : Tuple = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : Optional[Any] = get_size_dict(snake_case_ , param_name="""crop_size""" )
_lowerCAmelCase : Any = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase : Optional[Any] = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
_lowerCAmelCase : int = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
_lowerCAmelCase : Dict = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
_lowerCAmelCase : Dict = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
_lowerCAmelCase : Optional[int] = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
_lowerCAmelCase : Tuple = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
_lowerCAmelCase : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 309
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# ===== initialization =====
_lowerCAmelCase : Tuple = Mock()
_lowerCAmelCase : Any = conn, Mock()
_lowerCAmelCase : Optional[Any] = iter([1, None] )
_lowerCAmelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 309
| 1
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309
| 1
|
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=0.2 , snake_case_=0.2 ):
_lowerCAmelCase : List[str] = bp_numa
_lowerCAmelCase : Optional[int] = bp_numa
_lowerCAmelCase : int = bp_numa
_lowerCAmelCase : int = conva_get[:2]
_lowerCAmelCase : Any = conva_get[2]
_lowerCAmelCase : Dict = size_pa
_lowerCAmelCase : Optional[int] = rate_w
_lowerCAmelCase : List[str] = rate_t
_lowerCAmelCase : int = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
_lowerCAmelCase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
_lowerCAmelCase : List[str] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
_lowerCAmelCase : List[str] = -2 * np.random.rand(self.conva[1] ) + 1
_lowerCAmelCase : Optional[Any] = -2 * np.random.rand(self.num_bpa ) + 1
_lowerCAmelCase : int = -2 * np.random.rand(self.num_bpa ) + 1
def __UpperCamelCase ( self , snake_case_ ):
# save model dict with pickle
_lowerCAmelCase : Dict = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(snake_case_ , """wb""" ) as f:
pickle.dump(snake_case_ , snake_case_ )
print(f'Model saved: {save_path}' )
@classmethod
def __UpperCamelCase ( cls , snake_case_ ):
# read saved model
with open(snake_case_ , """rb""" ) as f:
_lowerCAmelCase : Tuple = pickle.load(snake_case_ ) # noqa: S301
_lowerCAmelCase : List[str] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
_lowerCAmelCase : int = model_dic.get("""size_pooling1""" )
_lowerCAmelCase : str = model_dic.get("""num_bp1""" )
_lowerCAmelCase : Union[str, Any] = model_dic.get("""num_bp2""" )
_lowerCAmelCase : List[Any] = model_dic.get("""num_bp3""" )
_lowerCAmelCase : Optional[int] = model_dic.get("""rate_weight""" )
_lowerCAmelCase : str = model_dic.get("""rate_thre""" )
# create model instance
_lowerCAmelCase : str = CNN(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# modify model parameter
_lowerCAmelCase : Union[str, Any] = model_dic.get("""w_conv1""" )
_lowerCAmelCase : str = model_dic.get("""wkj""" )
_lowerCAmelCase : Dict = model_dic.get("""vji""" )
_lowerCAmelCase : int = model_dic.get("""thre_conv1""" )
_lowerCAmelCase : Dict = model_dic.get("""thre_bp2""" )
_lowerCAmelCase : Dict = model_dic.get("""thre_bp3""" )
return conv_ins
def __UpperCamelCase ( self , snake_case_ ):
return 1 / (1 + np.exp(-1 * x ))
def __UpperCamelCase ( self , snake_case_ ):
return round(snake_case_ , 3 )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
# convolution process
_lowerCAmelCase : Optional[int] = convs[0]
_lowerCAmelCase : Union[str, Any] = convs[1]
_lowerCAmelCase : Optional[int] = np.shape(snake_case_ )[0]
# get the data slice of original image data, data_focus
_lowerCAmelCase : str = []
for i_focus in range(0 , size_data - size_conv + 1 , snake_case_ ):
for j_focus in range(0 , size_data - size_conv + 1 , snake_case_ ):
_lowerCAmelCase : Tuple = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(snake_case_ )
# calculate the feature map of every single kernel, and saved as list of matrix
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(snake_case_ ):
_lowerCAmelCase : Dict = []
for i_focus in range(len(snake_case_ ) ):
_lowerCAmelCase : Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(snake_case_ ) )
_lowerCAmelCase : Optional[int] = np.asmatrix(snake_case_ ).reshape(
snake_case_ , snake_case_ )
data_featuremap.append(snake_case_ )
# expanding the data slice to One dimenssion
_lowerCAmelCase : List[str] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(snake_case_ ) )
_lowerCAmelCase : int = np.asarray(snake_case_ )
return focus_list, data_featuremap
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_="average_pool" ):
# pooling process
_lowerCAmelCase : List[Any] = len(featuremaps[0] )
_lowerCAmelCase : List[Any] = int(size_map / size_pooling )
_lowerCAmelCase : Any = []
for i_map in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = featuremaps[i_map]
_lowerCAmelCase : Dict = []
for i_focus in range(0 , snake_case_ , snake_case_ ):
for j_focus in range(0 , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(snake_case_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(snake_case_ ) )
_lowerCAmelCase : Union[str, Any] = np.asmatrix(snake_case_ ).reshape(snake_case_ , snake_case_ )
featuremap_pooled.append(snake_case_ )
return featuremap_pooled
def __UpperCamelCase ( self , snake_case_ ):
# expanding three dimension data to one dimension list
_lowerCAmelCase : Dict = []
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Dict = np.shape(data[i] )
_lowerCAmelCase : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
_lowerCAmelCase : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(snake_case_ )
_lowerCAmelCase : Optional[int] = np.asarray(snake_case_ )
return data_expanded
def __UpperCamelCase ( self , snake_case_ ):
# expanding matrix to one dimension list
_lowerCAmelCase : Optional[Any] = np.asarray(snake_case_ )
_lowerCAmelCase : Optional[Any] = np.shape(snake_case_ )
_lowerCAmelCase : Any = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Tuple = 0
for i_map in range(snake_case_ ):
_lowerCAmelCase : List[Any] = np.ones((size_map, size_map) )
for i in range(0 , snake_case_ , snake_case_ ):
for j in range(0 , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = pd_pool[
i_pool
]
_lowerCAmelCase : Any = i_pool + 1
_lowerCAmelCase : List[str] = np.multiply(
snake_case_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(snake_case_ )
return pd_all
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=bool ):
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(snake_case_ )) )
print((""" - - Shape: Teach_Data """, np.shape(snake_case_ )) )
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : int = []
_lowerCAmelCase : Optional[Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
_lowerCAmelCase : List[Any] = 0
print(f'-------------Learning Time {rp}--------------' )
for p in range(len(snake_case_ ) ):
# print('------------Learning Image: %d--------------'%p)
_lowerCAmelCase : List[Any] = np.asmatrix(datas_train[p] )
_lowerCAmelCase : str = np.asarray(datas_teach[p] )
_lowerCAmelCase , _lowerCAmelCase : int = self.convolute(
snake_case_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_lowerCAmelCase : Any = self.pooling(snake_case_ , self.size_poolinga )
_lowerCAmelCase : Optional[Any] = np.shape(snake_case_ )
_lowerCAmelCase : Union[str, Any] = self._expand(snake_case_ )
_lowerCAmelCase : str = data_bp_input
_lowerCAmelCase : Tuple = np.dot(snake_case_ , self.vji.T ) - self.thre_bpa
_lowerCAmelCase : Tuple = self.sig(snake_case_ )
_lowerCAmelCase : Tuple = np.dot(snake_case_ , self.wkj.T ) - self.thre_bpa
_lowerCAmelCase : Dict = self.sig(snake_case_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_lowerCAmelCase : Optional[Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(snake_case_ , (1 - bp_outa) ) )
_lowerCAmelCase : Union[str, Any] = np.multiply(
np.dot(snake_case_ , self.wkj ) , np.multiply(snake_case_ , (1 - bp_outa) ) )
_lowerCAmelCase : List[str] = np.dot(snake_case_ , self.vji )
_lowerCAmelCase : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga)
_lowerCAmelCase : int = pd_conva_pooled.T.getA().tolist()
_lowerCAmelCase : Optional[int] = self._calculate_gradient_from_pool(
snake_case_ , snake_case_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
_lowerCAmelCase : List[Any] = self._expand_mat(pd_conva_all[k_conv] )
_lowerCAmelCase : str = self.rate_weight * np.dot(snake_case_ , snake_case_ )
_lowerCAmelCase : int = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
_lowerCAmelCase : int = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
_lowerCAmelCase : str = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_lowerCAmelCase : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_lowerCAmelCase : Union[str, Any] = self.thre_bpa - pd_k_all * self.rate_thre
_lowerCAmelCase : int = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_lowerCAmelCase : Union[str, Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_lowerCAmelCase : Optional[Any] = rp + 1
_lowerCAmelCase : str = error_count / patterns
all_mse.append(snake_case_ )
def draw_error():
_lowerCAmelCase : Dict = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(snake_case_ , """+-""" )
plt.plot(snake_case_ , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(snake_case_ , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, f' - - Mse: {mse:.6f}') )
if draw_e:
draw_error()
return mse
def __UpperCamelCase ( self , snake_case_ ):
# model predict
_lowerCAmelCase : List[str] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(snake_case_ )) )
for p in range(len(snake_case_ ) ):
_lowerCAmelCase : Dict = np.asmatrix(datas_test[p] )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.convolute(
snake_case_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_lowerCAmelCase : Union[str, Any] = self.pooling(snake_case_ , self.size_poolinga )
_lowerCAmelCase : Tuple = self._expand(snake_case_ )
_lowerCAmelCase : Dict = data_bp_input
_lowerCAmelCase : Dict = bp_outa * self.vji.T - self.thre_bpa
_lowerCAmelCase : List[Any] = self.sig(snake_case_ )
_lowerCAmelCase : Dict = bp_outa * self.wkj.T - self.thre_bpa
_lowerCAmelCase : List[str] = self.sig(snake_case_ )
produce_out.extend(bp_outa.getA().tolist() )
_lowerCAmelCase : List[Any] = [list(map(self.do_round , snake_case_ ) ) for each in produce_out]
return np.asarray(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
# return the data of image after convoluting process so we can check it out
_lowerCAmelCase : List[str] = np.asmatrix(snake_case_ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.convolute(
snake_case_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_lowerCAmelCase : List[str] = self.pooling(snake_case_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 309
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
| 1
|
'''simple docstring'''
import os
from collections.abc import Iterator
def _UpperCAmelCase ( _lowerCamelCase : str = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowerCamelCase ):
_lowerCAmelCase : List[str] = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowerCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowerCamelCase , _lowerCamelCase ).lstrip("""./""" )
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> Optional[Any]:
return f'{i * " "}*' if i else "\n##"
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str ) -> str:
_lowerCAmelCase : List[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowerCamelCase ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(_lowerCamelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _UpperCAmelCase ( _lowerCamelCase : str = "." ) -> None:
_lowerCAmelCase : Tuple = """"""
for filepath in sorted(good_file_paths(_lowerCamelCase ) ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = os.path.split(_lowerCamelCase )
if filepath != old_path:
_lowerCAmelCase : Any = print_path(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Any = (filepath.count(os.sep ) + 1) if filepath else 0
_lowerCAmelCase : Dict = f'{filepath}/{filename}'.replace(""" """ , """%20""" )
_lowerCAmelCase : Tuple = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f'{md_prefix(_lowerCamelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md(""".""")
| 309
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309
| 1
|
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any ) -> Dict:
# Initialise PyTorch model
_lowerCAmelCase : List[Any] = TaConfig.from_json_file(_lowerCamelCase )
print(f'Building PyTorch model from configuration: {config}' )
_lowerCAmelCase : Union[str, Any] = TaForConditionalGeneration(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 309
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : List[Any] = min(_lowerCamelCase ) # min() finds the minimum value
_lowerCAmelCase : Tuple = max(_lowerCamelCase ) # max() finds the maximum value
_lowerCAmelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase : Dict = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase : Any = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase : Optional[int] = count + min_val
i += 1
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" , """ """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ (_a , unittest.TestCase ):
__lowerCAmelCase : Tuple = MgpstrTokenizer
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Tuple = {}
__lowerCAmelCase : Tuple = False
def __UpperCamelCase ( self ):
super().setUp()
# fmt: off
_lowerCAmelCase : List[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCAmelCase : Union[str, Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
def __UpperCamelCase ( self , **snake_case_ ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Tuple = """tester"""
_lowerCAmelCase : Optional[int] = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_lowerCAmelCase : List[str] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
_lowerCAmelCase : List[str] = tokenizer.encode([special_token] , add_special_tokens=snake_case_ )
self.assertEqual(len(snake_case_ ) , 1 )
_lowerCAmelCase : Any = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
self.assertTrue(special_token not in decoded )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.get_input_output_texts(snake_case_ )
_lowerCAmelCase : List[str] = tokenizer.tokenize(snake_case_ )
_lowerCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ )
_lowerCAmelCase : Optional[Any] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertNotEqual(len(snake_case_ ) , 0 )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(text_a.replace(""" """ , """""" ) , snake_case_ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def __UpperCamelCase ( self ):
pass
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCamelCase_ = 25_00_04
UpperCamelCase_ = 25_00_20
@require_sentencepiece
@require_tokenizers
class a_ (_a , unittest.TestCase ):
__lowerCAmelCase : List[str] = MBartaaTokenizer
__lowerCAmelCase : Optional[Any] = MBartaaTokenizerFast
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Optional[Any] = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : int = MBartaaTokenizer(snake_case_ , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = """<s>"""
_lowerCAmelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 1_0_5_4 )
def __UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case_ , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_lowerCAmelCase : Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case_ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
_lowerCAmelCase : int = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def __UpperCamelCase ( self ):
# fmt: off
_lowerCAmelCase : int = {"""input_ids""": [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def __UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_lowerCAmelCase : Any = tempfile.mkdtemp()
_lowerCAmelCase : Union[str, Any] = tokenizer_r.save_pretrained(snake_case_ )
_lowerCAmelCase : Union[str, Any] = tokenizer_p.save_pretrained(snake_case_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
_lowerCAmelCase : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(snake_case_ , snake_case_ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Union[str, Any] = tokenizer_r.from_pretrained(snake_case_ )
_lowerCAmelCase : Any = tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ , snake_case_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case_ )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Any = tempfile.mkdtemp()
_lowerCAmelCase : Optional[int] = tokenizer_r.save_pretrained(snake_case_ , legacy_format=snake_case_ )
_lowerCAmelCase : List[Any] = tokenizer_p.save_pretrained(snake_case_ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case_ , snake_case_ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case_ )
_lowerCAmelCase : Any = tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ , snake_case_ ) )
shutil.rmtree(snake_case_ )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Union[str, Any] = tokenizer_r.save_pretrained(snake_case_ , legacy_format=snake_case_ )
_lowerCAmelCase : Optional[Any] = tokenizer_p.save_pretrained(snake_case_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ , snake_case_ ) )
shutil.rmtree(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ (unittest.TestCase ):
__lowerCAmelCase : Dict = """facebook/mbart-large-50-one-to-many-mmt"""
__lowerCAmelCase : str = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
__lowerCAmelCase : str = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
__lowerCAmelCase : List[str] = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def __UpperCamelCase ( cls ):
_lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
_lowerCAmelCase : List[str] = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 2_5_0_0_3_8 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __UpperCamelCase ( self ):
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Optional[int] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
_lowerCAmelCase : int = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_lowerCAmelCase : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , snake_case_ )
_lowerCAmelCase : int = 1_0
_lowerCAmelCase : str = self.tokenizer(snake_case_ , max_length=snake_case_ , truncation=snake_case_ ).input_ids[0]
self.assertEqual(ids[0] , snake_case_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = tempfile.mkdtemp()
_lowerCAmelCase : str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case_ )
_lowerCAmelCase : List[Any] = MBartaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case_ )
@require_torch
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="""pt""" )
_lowerCAmelCase : Any = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case_ , truncation=snake_case_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
_lowerCAmelCase : Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
_lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.tokenizer(self.src_text , padding=snake_case_ , truncation=snake_case_ , max_length=3 , return_tensors="""pt""" )
_lowerCAmelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=snake_case_ , truncation=snake_case_ , max_length=1_0 , return_tensors="""pt""" )
_lowerCAmelCase : str = targets["""input_ids"""]
_lowerCAmelCase : Optional[int] = shift_tokens_right(snake_case_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# en_XX, A, test, EOS
"""input_ids""": [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""DPTFeatureExtractor"""]
UpperCamelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
UpperCamelCase_ = """hf-internal-testing/tiny-random-bert"""
UpperCamelCase_ = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
UpperCamelCase_ = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = cached_file(snake_case_ , snake_case_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(snake_case_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(snake_case_ , snake_case_ ) ) )
with open(os.path.join(snake_case_ , """refs""" , """main""" ) ) as f:
_lowerCAmelCase : Tuple = f.read()
self.assertEqual(snake_case_ , os.path.join(snake_case_ , """snapshots""" , snake_case_ , snake_case_ ) )
self.assertTrue(os.path.isfile(snake_case_ ) )
# File is cached at the same place the second time.
_lowerCAmelCase : List[Any] = cached_file(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
# Using a specific revision to test the full commit hash.
_lowerCAmelCase : int = cached_file(snake_case_ , snake_case_ , revision="""9b8c223""" )
self.assertEqual(snake_case_ , os.path.join(snake_case_ , """snapshots""" , snake_case_ , snake_case_ ) )
def __UpperCamelCase ( self ):
with self.assertRaisesRegex(snake_case_ , """is not a valid model identifier""" ):
_lowerCAmelCase : str = cached_file("""tiny-random-bert""" , snake_case_ )
with self.assertRaisesRegex(snake_case_ , """is not a valid git identifier""" ):
_lowerCAmelCase : int = cached_file(snake_case_ , snake_case_ , revision="""aaaa""" )
with self.assertRaisesRegex(snake_case_ , """does not appear to have a file named""" ):
_lowerCAmelCase : List[Any] = cached_file(snake_case_ , """conf""" )
def __UpperCamelCase ( self ):
with self.assertRaisesRegex(snake_case_ , """does not appear to have a file named""" ):
_lowerCAmelCase : Tuple = cached_file(snake_case_ , """conf""" )
with open(os.path.join(snake_case_ , """refs""" , """main""" ) ) as f:
_lowerCAmelCase : Optional[int] = f.read()
self.assertTrue(os.path.isfile(os.path.join(snake_case_ , """.no_exist""" , snake_case_ , """conf""" ) ) )
_lowerCAmelCase : Optional[Any] = cached_file(snake_case_ , """conf""" , _raise_exceptions_for_missing_entries=snake_case_ )
self.assertIsNone(snake_case_ )
_lowerCAmelCase : Optional[int] = cached_file(snake_case_ , """conf""" , local_files_only=snake_case_ , _raise_exceptions_for_missing_entries=snake_case_ )
self.assertIsNone(snake_case_ )
_lowerCAmelCase : Tuple = mock.Mock()
_lowerCAmelCase : str = 5_0_0
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : Tuple = HTTPError
_lowerCAmelCase : Optional[int] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=snake_case_ ) as mock_head:
_lowerCAmelCase : str = cached_file(snake_case_ , """conf""" , _raise_exceptions_for_connection_errors=snake_case_ )
self.assertIsNone(snake_case_ )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self ):
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , snake_case_ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , snake_case_ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , snake_case_ ) )
def __UpperCamelCase ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(snake_case_ , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , snake_case_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(snake_case_ , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , snake_case_ , revision="""ahaha""" )
_lowerCAmelCase : Optional[Any] = get_file_from_repo("""bert-base-cased""" , snake_case_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowerCAmelCase : int = json.loads(open(snake_case_ , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 7_6_8 )
def __UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : List[Any] = Path(snake_case_ ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(snake_case_ , """a.txt""" ) , str(snake_case_ ) )
self.assertIsNone(get_file_from_repo(snake_case_ , """b.txt""" ) )
| 309
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : list[float] ) -> Dict:
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
| 1
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
| 1
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(_a )
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
super().__init__(*snake_case_ , **snake_case_ )
requires_backends(self , """vision""" )
self.check_model_type(snake_case_ )
def __call__( self , snake_case_ , **snake_case_ ):
return super().__call__(snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return {}, {}, {}
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = load_image(snake_case_ )
_lowerCAmelCase : Dict = image.size
_lowerCAmelCase : str = self.image_processor(images=snake_case_ , return_tensors=self.framework )
return model_inputs
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.model(**snake_case_ )
return model_outputs
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : int = model_outputs.predicted_depth
_lowerCAmelCase : List[str] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=snake_case_ )
_lowerCAmelCase : List[Any] = prediction.squeeze().cpu().numpy()
_lowerCAmelCase : List[Any] = (output * 2_5_5 / np.max(snake_case_ )).astype("""uint8""" )
_lowerCAmelCase : Union[str, Any] = Image.fromarray(snake_case_ )
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : str = predicted_depth
_lowerCAmelCase : List[str] = depth
return output_dict
| 309
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**snake_case_ )
_lowerCAmelCase : str = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """spiece.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
UpperCamelCase_ = {"""bert_for_seq_generation""": 5_12}
class a_ (_a ):
__lowerCAmelCase : int = VOCAB_FILES_NAMES
__lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="<s>" , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<::::>" , snake_case_ = None , **snake_case_ , ):
_lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , sep_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
_lowerCAmelCase : Any = vocab_file
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@property
def __UpperCamelCase ( self ):
return self.sp_model.get_piece_size()
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : Union[str, Any] = self.__dict__.copy()
_lowerCAmelCase : Optional[Any] = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : Optional[Any] = {}
_lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.piece_to_id(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.sp_model.IdToPiece(snake_case_ )
return token
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Dict = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Union[str, Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : List[Any] = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 309
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = """upernet"""
def __init__( self , snake_case_=None , snake_case_=5_1_2 , snake_case_=0.02 , snake_case_=[1, 2, 3, 6] , snake_case_=True , snake_case_=0.4 , snake_case_=3_8_4 , snake_case_=2_5_6 , snake_case_=1 , snake_case_=False , snake_case_=2_5_5 , **snake_case_ , ):
super().__init__(**snake_case_ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_lowerCAmelCase : Dict = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase : int = backbone_config.get("""model_type""" )
_lowerCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : int = config_class.from_dict(snake_case_ )
_lowerCAmelCase : int = backbone_config
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : List[Any] = pool_scales
_lowerCAmelCase : Dict = use_auxiliary_head
_lowerCAmelCase : Union[str, Any] = auxiliary_loss_weight
_lowerCAmelCase : Union[str, Any] = auxiliary_in_channels
_lowerCAmelCase : Any = auxiliary_channels
_lowerCAmelCase : int = auxiliary_num_convs
_lowerCAmelCase : List[str] = auxiliary_concat_input
_lowerCAmelCase : List[Any] = loss_ignore_index
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : Tuple = self.backbone_config.to_dict()
_lowerCAmelCase : Tuple = self.__class__.model_type
return output
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"""configuration_clap""": [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapAudioConfig""",
"""ClapConfig""",
"""ClapTextConfig""",
],
"""processing_clap""": ["""ClapProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapModel""",
"""ClapPreTrainedModel""",
"""ClapTextModel""",
"""ClapTextModelWithProjection""",
"""ClapAudioModel""",
"""ClapAudioModelWithProjection""",
]
UpperCamelCase_ = ["""ClapFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase_ = 0
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase_ = tuple[int, int]
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = pos_x
_lowerCAmelCase : List[str] = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : List[Any] = goal_x
_lowerCAmelCase : int = goal_y
_lowerCAmelCase : Union[str, Any] = g_cost
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = self.calculate_heuristic()
_lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.pos_x - self.goal_x
_lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : int = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_lowerCAmelCase : List[str] = current_bwd_node
_lowerCAmelCase : Dict = current_fwd_node
_lowerCAmelCase : Any = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = AStar(init, goal)
UpperCamelCase_ = a_star.search()
UpperCamelCase_ = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalAStar(init, goal)
UpperCamelCase_ = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class a_ (_a ):
__lowerCAmelCase : List[Any] = """gpt_neox"""
def __init__( self , snake_case_=5_0_4_3_2 , snake_case_=6_1_4_4 , snake_case_=4_4 , snake_case_=6_4 , snake_case_=2_4_5_7_6 , snake_case_="gelu" , snake_case_=0.25 , snake_case_=1_0_0_0_0 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=2_0_4_8 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=True , snake_case_=0 , snake_case_=2 , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : str = rotary_pct
_lowerCAmelCase : Union[str, Any] = rotary_emb_base
_lowerCAmelCase : List[str] = attention_dropout
_lowerCAmelCase : int = hidden_dropout
_lowerCAmelCase : List[Any] = classifier_dropout
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Any = use_cache
_lowerCAmelCase : str = tie_word_embeddings
_lowerCAmelCase : Union[str, Any] = use_parallel_residual
_lowerCAmelCase : Any = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def __UpperCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'got {self.rope_scaling}' )
_lowerCAmelCase : List[Any] = self.rope_scaling.get("""type""" , snake_case_ )
_lowerCAmelCase : List[str] = self.rope_scaling.get("""factor""" , snake_case_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(snake_case_ , snake_case_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
UpperCamelCase_ = logging.getLogger(__name__)
@dataclass
class a_ (_a ):
__lowerCAmelCase : Optional[float] = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
__lowerCAmelCase : bool = field(default=_a , metadata={"""help""": """Whether to SortishSamler or not."""} )
__lowerCAmelCase : bool = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__lowerCAmelCase : bool = field(default=_a , metadata={"""help""": """whether to use adafactor"""} )
__lowerCAmelCase : Optional[float] = field(
default=_a , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
__lowerCAmelCase : Optional[float] = field(
default=_a , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
__lowerCAmelCase : Optional[float] = field(default=_a , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
__lowerCAmelCase : Optional[float] = field(
default=_a , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
__lowerCAmelCase : Optional[str] = field(
default="""linear""" , metadata={"""help""": f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 309
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309
| 1
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """spiece.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
UpperCamelCase_ = {
"""albert-base-v1""": 5_12,
"""albert-large-v1""": 5_12,
"""albert-xlarge-v1""": 5_12,
"""albert-xxlarge-v1""": 5_12,
"""albert-base-v2""": 5_12,
"""albert-large-v2""": 5_12,
"""albert-xlarge-v2""": 5_12,
"""albert-xxlarge-v2""": 5_12,
}
UpperCamelCase_ = """▁"""
class a_ (_a ):
__lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
__lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case_ , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_="[CLS]" , snake_case_="[SEP]" , snake_case_="<unk>" , snake_case_="[SEP]" , snake_case_="<pad>" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_ = None , **snake_case_ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCAmelCase : List[Any] = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
_lowerCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
_lowerCAmelCase : Any = do_lower_case
_lowerCAmelCase : int = remove_space
_lowerCAmelCase : List[str] = keep_accents
_lowerCAmelCase : Any = vocab_file
_lowerCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@property
def __UpperCamelCase ( self ):
return len(self.sp_model )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : Union[str, Any] = self.__dict__.copy()
_lowerCAmelCase : Tuple = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : Optional[Any] = {}
_lowerCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self , snake_case_ ):
if self.remove_space:
_lowerCAmelCase : Optional[int] = """ """.join(inputs.strip().split() )
else:
_lowerCAmelCase : str = inputs
_lowerCAmelCase : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_lowerCAmelCase : Union[str, Any] = unicodedata.normalize("""NFKD""" , snake_case_ )
_lowerCAmelCase : List[Any] = """""".join([c for c in outputs if not unicodedata.combining(snake_case_ )] )
if self.do_lower_case:
_lowerCAmelCase : Optional[int] = outputs.lower()
return outputs
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[int] = self.preprocess_text(snake_case_ )
_lowerCAmelCase : Any = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
_lowerCAmelCase : int = []
for piece in pieces:
if len(snake_case_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowerCAmelCase : Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : Optional[Any] = cur_pieces[1:]
else:
_lowerCAmelCase : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case_ )
else:
new_pieces.append(snake_case_ )
return new_pieces
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.PieceToId(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.IdToPiece(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Union[str, Any] = []
else:
current_sub_tokens.append(snake_case_ )
_lowerCAmelCase : str = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : List[str] = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 309
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
| 1
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class a_ (_a ):
__lowerCAmelCase : List[str] = """encodec"""
def __init__( self , snake_case_=[1.5, 3.0, 6.0, 12.0, 24.0] , snake_case_=2_4_0_0_0 , snake_case_=1 , snake_case_=False , snake_case_=None , snake_case_=None , snake_case_=1_2_8 , snake_case_=3_2 , snake_case_=1 , snake_case_=[8, 5, 4, 2] , snake_case_="weight_norm" , snake_case_=7 , snake_case_=7 , snake_case_=3 , snake_case_=2 , snake_case_=True , snake_case_="reflect" , snake_case_=2 , snake_case_=2 , snake_case_=1.0 , snake_case_=1_0_2_4 , snake_case_=None , snake_case_=True , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = target_bandwidths
_lowerCAmelCase : Any = sampling_rate
_lowerCAmelCase : Tuple = audio_channels
_lowerCAmelCase : str = normalize
_lowerCAmelCase : Any = chunk_length_s
_lowerCAmelCase : List[Any] = overlap
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : Tuple = num_filters
_lowerCAmelCase : Dict = num_residual_layers
_lowerCAmelCase : Union[str, Any] = upsampling_ratios
_lowerCAmelCase : int = norm_type
_lowerCAmelCase : Tuple = kernel_size
_lowerCAmelCase : int = last_kernel_size
_lowerCAmelCase : str = residual_kernel_size
_lowerCAmelCase : Tuple = dilation_growth_rate
_lowerCAmelCase : Dict = use_causal_conv
_lowerCAmelCase : int = pad_mode
_lowerCAmelCase : Optional[Any] = compress
_lowerCAmelCase : str = num_lstm_layers
_lowerCAmelCase : List[Any] = trim_right_ratio
_lowerCAmelCase : Dict = codebook_size
_lowerCAmelCase : List[Any] = codebook_dim if codebook_dim is not None else hidden_size
_lowerCAmelCase : List[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**snake_case_ )
@property
def __UpperCamelCase ( self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __UpperCamelCase ( self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __UpperCamelCase ( self ):
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 309
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 1
|
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class a_ (pl.LightningModule ):
def __init__( self , snake_case_ ):
super().__init__()
_lowerCAmelCase : List[Any] = model
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : Union[str, Any] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __UpperCamelCase ( self ):
pass
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str ) -> List[Any]:
# load longformer model from model identifier
_lowerCAmelCase : str = LongformerModel.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : str = LightningModel(_lowerCamelCase )
_lowerCAmelCase : Any = torch.load(_lowerCamelCase , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
_lowerCAmelCase : Union[str, Any] = LongformerForQuestionAnswering.from_pretrained(_lowerCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_lowerCamelCase )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase_ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 309
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# ===== initialization =====
_lowerCAmelCase : Tuple = Mock()
_lowerCAmelCase : Any = conn, Mock()
_lowerCAmelCase : Optional[Any] = iter([1, None] )
_lowerCAmelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 309
| 1
|
'''simple docstring'''
UpperCamelCase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCamelCase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCamelCase_ = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ) -> str:
assert len(str(_lowerCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_lowerCAmelCase : Optional[Any] = year // 1_00
_lowerCAmelCase : Union[str, Any] = (5 * (century % 4) + 2) % 7
_lowerCAmelCase : Tuple = year % 1_00
_lowerCAmelCase : Any = centurian % 12
_lowerCAmelCase : Dict = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_lowerCAmelCase : List[str] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_lowerCAmelCase : str = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
| 1
|
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _UpperCAmelCase ( ) -> None:
print("""Making key files...""" )
make_key_files("""rsa""" , 10_24 )
print("""Key files generation successful.""" )
def _UpperCAmelCase ( _lowerCamelCase : int ) -> tuple[tuple[int, int], tuple[int, int]]:
print("""Generating prime p...""" )
_lowerCAmelCase : Tuple = rabinMiller.generate_large_prime(_lowerCamelCase )
print("""Generating prime q...""" )
_lowerCAmelCase : List[str] = rabinMiller.generate_large_prime(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = p * q
print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" )
while True:
_lowerCAmelCase : List[str] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("""Calculating d that is mod inverse of e...""" )
_lowerCAmelCase : Tuple = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) )
_lowerCAmelCase : Optional[Any] = (n, e)
_lowerCAmelCase : int = (n, d)
return (public_key, private_key)
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : int ) -> None:
if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ):
print("""\nWARNING:""" )
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
_lowerCAmelCase , _lowerCAmelCase : int = generate_key(_lowerCamelCase )
print(f'\nWriting public key to file {name}_pubkey.txt...' )
with open(f'{name}_pubkey.txt' , """w""" ) as out_file:
out_file.write(f'{key_size},{public_key[0]},{public_key[1]}' )
print(f'Writing private key to file {name}_privkey.txt...' )
with open(f'{name}_privkey.txt' , """w""" ) as out_file:
out_file.write(f'{key_size},{private_key[0]},{private_key[1]}' )
if __name__ == "__main__":
main()
| 309
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309
| 1
|
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class a_ (nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : jnp.dtype = jnp.floataa
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = hidden_states.shape
_lowerCAmelCase : str = jax.image.resize(
snake_case_ , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
_lowerCAmelCase : int = self.conv(snake_case_ )
return hidden_states
class a_ (nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : jnp.dtype = jnp.floataa
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , snake_case_ ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_lowerCAmelCase : List[str] = self.conv(snake_case_ )
return hidden_states
class a_ (nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : int = None
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : bool = None
__lowerCAmelCase : jnp.dtype = jnp.floataa
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels
_lowerCAmelCase : Union[str, Any] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
_lowerCAmelCase : str = nn.Conv(
snake_case_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCAmelCase : int = nn.Dense(snake_case_ , dtype=self.dtype )
_lowerCAmelCase : Any = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
_lowerCAmelCase : int = nn.Dropout(self.dropout_prob )
_lowerCAmelCase : Optional[Any] = nn.Conv(
snake_case_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCAmelCase : Dict = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_lowerCAmelCase : Tuple = None
if use_nin_shortcut:
_lowerCAmelCase : Any = nn.Conv(
snake_case_ , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self , snake_case_ , snake_case_ , snake_case_=True ):
_lowerCAmelCase : Optional[int] = hidden_states
_lowerCAmelCase : Tuple = self.norma(snake_case_ )
_lowerCAmelCase : int = nn.swish(snake_case_ )
_lowerCAmelCase : int = self.conva(snake_case_ )
_lowerCAmelCase : Optional[int] = self.time_emb_proj(nn.swish(snake_case_ ) )
_lowerCAmelCase : Dict = jnp.expand_dims(jnp.expand_dims(snake_case_ , 1 ) , 1 )
_lowerCAmelCase : List[str] = hidden_states + temb
_lowerCAmelCase : str = self.norma(snake_case_ )
_lowerCAmelCase : Union[str, Any] = nn.swish(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.dropout(snake_case_ , snake_case_ )
_lowerCAmelCase : str = self.conva(snake_case_ )
if self.conv_shortcut is not None:
_lowerCAmelCase : List[str] = self.conv_shortcut(snake_case_ )
return hidden_states + residual
| 309
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
| 1
|
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class a_ (tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1.0 , snake_case_ = None , ):
super().__init__()
_lowerCAmelCase : List[Any] = initial_learning_rate
_lowerCAmelCase : Optional[int] = warmup_steps
_lowerCAmelCase : Tuple = power
_lowerCAmelCase : Optional[Any] = decay_schedule_fn
_lowerCAmelCase : List[Any] = name
def __call__( self , snake_case_ ):
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
_lowerCAmelCase : List[str] = tf.cast(snake_case_ , tf.floataa )
_lowerCAmelCase : Optional[int] = tf.cast(self.warmup_steps , tf.floataa )
_lowerCAmelCase : Tuple = global_step_float / warmup_steps_float
_lowerCAmelCase : Union[str, Any] = self.initial_learning_rate * tf.math.pow(snake_case_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=snake_case_ , )
def __UpperCamelCase ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _UpperCAmelCase ( _lowerCamelCase : float , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.0 , _lowerCamelCase : float = 0.9 , _lowerCamelCase : float = 0.999 , _lowerCamelCase : float = 1e-8 , _lowerCamelCase : Optional[float] = None , _lowerCamelCase : Optional[float] = None , _lowerCamelCase : float = 0.0 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : Optional[List[str]] = None , ) -> List[str]:
_lowerCAmelCase : Optional[Any] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_lowerCamelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_lowerCamelCase , )
if num_warmup_steps:
_lowerCAmelCase : Tuple = WarmUp(
initial_learning_rate=_lowerCamelCase , decay_schedule_fn=_lowerCamelCase , warmup_steps=_lowerCamelCase , )
if weight_decay_rate > 0.0:
_lowerCAmelCase : int = AdamWeightDecay(
learning_rate=_lowerCamelCase , weight_decay_rate=_lowerCamelCase , beta_a=_lowerCamelCase , beta_a=_lowerCamelCase , epsilon=_lowerCamelCase , clipnorm=_lowerCamelCase , global_clipnorm=_lowerCamelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=_lowerCamelCase , )
else:
_lowerCAmelCase : int = tf.keras.optimizers.Adam(
learning_rate=_lowerCamelCase , beta_a=_lowerCamelCase , beta_a=_lowerCamelCase , epsilon=_lowerCamelCase , clipnorm=_lowerCamelCase , global_clipnorm=_lowerCamelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class a_ (_a ):
def __init__( self , snake_case_ = 0.001 , snake_case_ = 0.9 , snake_case_ = 0.999 , snake_case_ = 1E-7 , snake_case_ = False , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = None , snake_case_ = "AdamWeightDecay" , **snake_case_ , ):
super().__init__(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
_lowerCAmelCase : List[str] = weight_decay_rate
_lowerCAmelCase : Union[str, Any] = include_in_weight_decay
_lowerCAmelCase : Tuple = exclude_from_weight_decay
@classmethod
def __UpperCamelCase ( cls , snake_case_ ):
_lowerCAmelCase : Dict = {"""WarmUp""": WarmUp}
return super(snake_case_ , cls ).from_config(snake_case_ , custom_objects=snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
super(snake_case_ , self )._prepare_local(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Tuple = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None , **snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase : str = list(zip(*snake_case_ ) )
return super(snake_case_ , self ).apply_gradients(zip(snake_case_ , snake_case_ ) , name=snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
_lowerCAmelCase : str = apply_state or {}
_lowerCAmelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
_lowerCAmelCase : Tuple = self._fallback_apply_state(snake_case_ , snake_case_ )
_lowerCAmelCase : str = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_=None ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , snake_case_ )
_lowerCAmelCase : List[str] = self._decay_weights_op(snake_case_ , snake_case_ , snake_case_ )
with tf.control_dependencies([decay] ):
return super(snake_case_ , self )._resource_apply_dense(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
_lowerCAmelCase , _lowerCAmelCase : Any = self._get_lr(var.device , var.dtype.base_dtype , snake_case_ )
_lowerCAmelCase : int = self._decay_weights_op(snake_case_ , snake_case_ , snake_case_ )
with tf.control_dependencies([decay] ):
return super(snake_case_ , self )._resource_apply_sparse(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def __UpperCamelCase ( self , snake_case_ ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(snake_case_ , snake_case_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(snake_case_ , snake_case_ ) is not None:
return False
return True
class a_ (_a ):
def __init__( self ):
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Dict = None
@property
def __UpperCamelCase ( self ):
if self._accum_steps is None:
_lowerCAmelCase : Dict = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=snake_case_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def __UpperCamelCase ( self ):
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , snake_case_ ):
if not self._gradients:
_lowerCAmelCase : Optional[int] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(snake_case_ ) , trainable=snake_case_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(snake_case_ ) != len(self._gradients ):
raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(snake_case_ )}' )
for accum_gradient, gradient in zip(self._gradients , snake_case_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(snake_case_ )
self._accum_steps.assign_add(1 )
def __UpperCamelCase ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(snake_case_ ) )
| 309
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
| 1
|
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCamelCase_ = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def _UpperCAmelCase ( _lowerCamelCase : Tuple=True ) -> int:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a ) )
class a_ (_a ):
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : str = None
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
with TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : str = dataset_module_factory(snake_case_ , cache_dir=snake_case_ )
_lowerCAmelCase : Optional[int] = import_main_class(dataset_module.module_path , dataset=snake_case_ )
_lowerCAmelCase : DatasetBuilder = builder_cls(
cache_dir=snake_case_ , config_name=snake_case_ , hash=dataset_module.hash , )
_lowerCAmelCase : Optional[int] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=snake_case_ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
_lowerCAmelCase : str = cached_path(snake_case_ , cache_dir=snake_case_ )
self.assertTrue(os.path.exists(snake_case_ ) )
@pytest.mark.integration
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Optional[int]:
_lowerCAmelCase : List[str] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
_lowerCAmelCase : Tuple = dataset_module_factory("""wikipedia""" , cache_dir=_lowerCamelCase )
_lowerCAmelCase : Dict = import_main_class(dataset_module.module_path )
_lowerCAmelCase : DatasetBuilder = builder_cls(
cache_dir=_lowerCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_lowerCAmelCase : Any = None
builder_instance.download_and_prepare()
_lowerCAmelCase : str = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _UpperCAmelCase ( _lowerCamelCase : Dict ) -> str:
_lowerCAmelCase : Optional[int] = dataset_module_factory("""wikipedia""" , cache_dir=_lowerCamelCase )
_lowerCAmelCase : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCamelCase )
_lowerCAmelCase : DatasetBuilder = builder_cls(
cache_dir=_lowerCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
_lowerCAmelCase : Dict = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert "train" in ds
assert isinstance(ds["""train"""] , _lowerCamelCase )
assert next(iter(ds["""train"""] ) )
| 309
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309
| 1
|
'''simple docstring'''
import numpy
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_lowerCAmelCase : List[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_lowerCAmelCase : List[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_lowerCAmelCase : Union[str, Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
_lowerCAmelCase : Any = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_lowerCAmelCase : Optional[Any] = numpy.zeros(output_array.shape )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_lowerCAmelCase : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_lowerCAmelCase : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_lowerCAmelCase : Optional[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_lowerCAmelCase : List[str] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
for iteration in range(1 , iterations + 1 ):
_lowerCAmelCase : List[str] = self.feedforward()
self.back_propagation()
if give_loss:
_lowerCAmelCase : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[int] = input_arr
_lowerCAmelCase : Any = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_lowerCAmelCase : List[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_lowerCAmelCase : List[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def _UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ) -> numpy.ndarray:
return (value) * (1 - (value))
def _UpperCAmelCase ( ) -> int:
_lowerCAmelCase : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_lowerCAmelCase : List[str] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_lowerCAmelCase : str = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 309
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : List[Any] = min(_lowerCamelCase ) # min() finds the minimum value
_lowerCAmelCase : Tuple = max(_lowerCamelCase ) # max() finds the maximum value
_lowerCAmelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase : Dict = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase : Any = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase : Optional[int] = count + min_val
i += 1
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" , """ """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 309
| 1
|
'''simple docstring'''
from numpy import exp, pi, sqrt
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : float = 0.0 , _lowerCamelCase : float = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("""transformers.models.encodec""")
UpperCamelCase_ = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
UpperCamelCase_ = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
UpperCamelCase_ = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
UpperCamelCase_ = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
UpperCamelCase_ = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
UpperCamelCase_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
UpperCamelCase_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
UpperCamelCase_ = []
UpperCamelCase_ = []
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : int ) -> int:
for attribute in key.split(""".""" ):
_lowerCAmelCase : Union[str, Any] = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
_lowerCAmelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
_lowerCAmelCase : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_lowerCAmelCase : int = value
elif weight_type == "weight_g":
_lowerCAmelCase : List[str] = value
elif weight_type == "weight_v":
_lowerCAmelCase : Dict = value
elif weight_type == "bias":
_lowerCAmelCase : str = value
elif weight_type == "running_mean":
_lowerCAmelCase : Tuple = value
elif weight_type == "running_var":
_lowerCAmelCase : str = value
elif weight_type == "num_batches_tracked":
_lowerCAmelCase : Tuple = value
elif weight_type == "weight_ih_l0":
_lowerCAmelCase : List[Any] = value
elif weight_type == "weight_hh_l0":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "bias_ih_l0":
_lowerCAmelCase : int = value
elif weight_type == "bias_hh_l0":
_lowerCAmelCase : Tuple = value
elif weight_type == "weight_ih_l1":
_lowerCAmelCase : str = value
elif weight_type == "weight_hh_l1":
_lowerCAmelCase : Any = value
elif weight_type == "bias_ih_l1":
_lowerCAmelCase : List[Any] = value
elif weight_type == "bias_hh_l1":
_lowerCAmelCase : int = value
else:
_lowerCAmelCase : Tuple = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int ) -> Optional[int]:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : List[str] ) -> Optional[Any]:
_lowerCAmelCase : List[str] = []
if model_name == "encodec_24khz" or "encodec_32khz":
_lowerCAmelCase : List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
_lowerCAmelCase : int = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(_lowerCamelCase , _lowerCamelCase ):
logger.info(f'{name} was ignored' )
continue
_lowerCAmelCase : List[str] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
_lowerCAmelCase : Union[str, Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
_lowerCAmelCase : Optional[int] = True
if "*" in mapped_key:
_lowerCAmelCase : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase : str = mapped_key.replace("""*""" , _lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase : Tuple = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase : List[str] = """weight_v"""
elif "weight_ih_l0" in name:
_lowerCAmelCase : List[Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
_lowerCAmelCase : str = """weight_hh_l0"""
elif "bias_ih_l0" in name:
_lowerCAmelCase : Optional[Any] = """bias_ih_l0"""
elif "bias_hh_l0" in name:
_lowerCAmelCase : Tuple = """bias_hh_l0"""
elif "weight_ih_l1" in name:
_lowerCAmelCase : Tuple = """weight_ih_l1"""
elif "weight_hh_l1" in name:
_lowerCAmelCase : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
_lowerCAmelCase : List[Any] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
_lowerCAmelCase : Tuple = """bias_hh_l1"""
elif "bias" in name:
_lowerCAmelCase : str = """bias"""
elif "weight" in name:
_lowerCAmelCase : Any = """weight"""
elif "running_mean" in name:
_lowerCAmelCase : str = """running_mean"""
elif "running_var" in name:
_lowerCAmelCase : Tuple = """running_var"""
elif "num_batches_tracked" in name:
_lowerCAmelCase : List[str] = """num_batches_tracked"""
else:
_lowerCAmelCase : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : List[Any]=None , _lowerCamelCase : List[str]=None , ) -> List[str]:
if config_path is not None:
_lowerCAmelCase : Dict = EncodecConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : str = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_lowerCAmelCase : int = [8, 5, 4, 4]
_lowerCAmelCase : List[Any] = [2.2]
_lowerCAmelCase : Union[str, Any] = 64
_lowerCAmelCase : Tuple = 3_20_00
_lowerCAmelCase : Optional[int] = 20_48
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : str = False
_lowerCAmelCase : List[str] = False
elif model_name == "encodec_48khz":
_lowerCAmelCase : Optional[Any] = [8, 5, 4, 2]
_lowerCAmelCase : Tuple = [3.0, 6.0, 12.0, 24.0]
_lowerCAmelCase : int = 4_80_00
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : Optional[Any] = """time_group_norm"""
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = 1.0
_lowerCAmelCase : Tuple = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
_lowerCAmelCase : Any = EncodecModel(_lowerCamelCase )
_lowerCAmelCase : Any = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_lowerCamelCase )
_lowerCAmelCase : str = torch.load(_lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_lowerCAmelCase : Optional[int] = original_checkpoint["""best_state"""]
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
UpperCamelCase_ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def _UpperCAmelCase ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""DPTFeatureExtractor"""]
UpperCamelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
UpperCamelCase_ = logging.getLogger(__name__)
@dataclass
class a_ :
__lowerCAmelCase : Optional[int] = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowerCAmelCase : bool = field(
default=_a , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__lowerCAmelCase : bool = field(
default=_a , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
__lowerCAmelCase : Optional[int] = field(
default=_a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__lowerCAmelCase : Optional[int] = field(
default=_a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
__lowerCAmelCase : Optional[int] = field(
default=_a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class a_ :
__lowerCAmelCase : str = field(
default=_a , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCAmelCase : str = field(
default=_a , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
__lowerCAmelCase : Optional[str] = field(
default=_a , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
__lowerCAmelCase : Optional[str] = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCAmelCase : Optional[str] = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCAmelCase : Optional[str] = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__lowerCAmelCase : Optional[bool] = field(
default=_a , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
__lowerCAmelCase : bool = field(
default=_a , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__lowerCAmelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__lowerCAmelCase : bool = field(
default=_a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__lowerCAmelCase : bool = field(
default=_a , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def _UpperCAmelCase ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowerCAmelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
datasets.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_lowerCAmelCase : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_lowerCAmelCase : Optional[int] = load_dataset(
"""xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
_lowerCAmelCase : str = load_dataset(
"""xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : int = train_dataset.features["""label"""].names
if training_args.do_eval:
_lowerCAmelCase : Optional[int] = load_dataset(
"""xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Union[str, Any] = eval_dataset.features["""label"""].names
if training_args.do_predict:
_lowerCAmelCase : Union[str, Any] = load_dataset(
"""xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Tuple = predict_dataset.features["""label"""].names
# Labels
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , idalabel={str(_lowerCamelCase ): label for i, label in enumerate(_lowerCamelCase )} , labelaid={label: i for i, label in enumerate(_lowerCamelCase )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : List[str] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_lowerCAmelCase : Any = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowerCAmelCase : Union[str, Any] = False
def preprocess_function(_lowerCamelCase : Union[str, Any] ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] , examples["""hypothesis"""] , padding=_lowerCamelCase , max_length=data_args.max_seq_length , truncation=_lowerCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowerCAmelCase : Union[str, Any] = min(len(_lowerCamelCase ) , data_args.max_train_samples )
_lowerCAmelCase : Dict = train_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
_lowerCAmelCase : Dict = train_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_lowerCamelCase ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowerCAmelCase : List[str] = min(len(_lowerCamelCase ) , data_args.max_eval_samples )
_lowerCAmelCase : List[str] = eval_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
_lowerCAmelCase : int = eval_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_lowerCAmelCase : Dict = min(len(_lowerCamelCase ) , data_args.max_predict_samples )
_lowerCAmelCase : Optional[int] = predict_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
_lowerCAmelCase : Union[str, Any] = predict_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , )
# Get the metric function
_lowerCAmelCase : int = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase : EvalPrediction ):
_lowerCAmelCase : int = p.predictions[0] if isinstance(p.predictions , _lowerCamelCase ) else p.predictions
_lowerCAmelCase : Tuple = np.argmax(_lowerCamelCase , axis=1 )
return metric.compute(predictions=_lowerCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowerCAmelCase : Any = default_data_collator
elif training_args.fpaa:
_lowerCAmelCase : Optional[int] = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8 )
else:
_lowerCAmelCase : int = None
# Initialize our Trainer
_lowerCAmelCase : List[Any] = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
_lowerCAmelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase : Union[str, Any] = last_checkpoint
_lowerCAmelCase : int = trainer.train(resume_from_checkpoint=_lowerCamelCase )
_lowerCAmelCase : Tuple = train_result.metrics
_lowerCAmelCase : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCamelCase )
)
_lowerCAmelCase : List[Any] = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , _lowerCamelCase )
trainer.save_metrics("""train""" , _lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase : Tuple = trainer.evaluate(eval_dataset=_lowerCamelCase )
_lowerCAmelCase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCamelCase )
_lowerCAmelCase : List[Any] = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics("""eval""" , _lowerCamelCase )
trainer.save_metrics("""eval""" , _lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = trainer.predict(_lowerCamelCase , metric_key_prefix="""predict""" )
_lowerCAmelCase : List[Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_lowerCamelCase )
)
_lowerCAmelCase : Optional[int] = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics("""predict""" , _lowerCamelCase )
trainer.save_metrics("""predict""" , _lowerCamelCase )
_lowerCAmelCase : str = np.argmax(_lowerCamelCase , axis=1 )
_lowerCAmelCase : List[str] = os.path.join(training_args.output_dir , """predictions.txt""" )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , """w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = label_list[item]
writer.write(f'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 309
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : list[float] ) -> Dict:
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
| 1
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Optional[Any]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ ):
super().__init__()
_lowerCAmelCase : Any = module
_lowerCAmelCase : Tuple = nn.Sequential(
nn.Linear(module.in_features , snake_case_ , bias=snake_case_ ) , nn.Linear(snake_case_ , module.out_features , bias=snake_case_ ) , )
_lowerCAmelCase : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=snake_case_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCamelCase ( self , snake_case_ , *snake_case_ , **snake_case_ ):
return self.module(snake_case_ , *snake_case_ , **snake_case_ ) + self.adapter(snake_case_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class a_ (unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
__lowerCAmelCase : List[Any] = """bigscience/bloom-1b7"""
# Constant values
__lowerCAmelCase : Optional[int] = 2.109659552692574
__lowerCAmelCase : int = """Hello my name is"""
__lowerCAmelCase : Tuple = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
__lowerCAmelCase : Optional[Any] = 1_0
def __UpperCamelCase ( self ):
# Models and tokenizer
_lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(self.model_name )
class a_ (_a ):
def __UpperCamelCase ( self ):
super().setUp()
# Models and tokenizer
_lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
_lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="""auto""" )
def __UpperCamelCase ( self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.model_abit.config
self.assertTrue(hasattr(snake_case_ , """quantization_config""" ) )
_lowerCAmelCase : str = config.to_dict()
_lowerCAmelCase : List[Any] = config.to_diff_dict()
_lowerCAmelCase : Union[str, Any] = config.to_json_string()
def __UpperCamelCase ( self ):
from bitsandbytes.nn import Paramsabit
_lowerCAmelCase : Tuple = self.model_fpaa.get_memory_footprint()
_lowerCAmelCase : Any = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
_lowerCAmelCase : Optional[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCamelCase ( self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(snake_case_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.tokenizer(self.input_text , return_tensors="""pt""" )
_lowerCAmelCase : List[Any] = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = BitsAndBytesConfig()
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=snake_case_ , device_map="""auto""" )
_lowerCAmelCase : List[Any] = self.tokenizer(self.input_text , return_tensors="""pt""" )
_lowerCAmelCase : str = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
def __UpperCamelCase ( self ):
with self.assertRaises(snake_case_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = BitsAndBytesConfig()
with self.assertRaises(snake_case_ ):
_lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=snake_case_ , load_in_abit=snake_case_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def __UpperCamelCase ( self ):
with self.assertRaises(snake_case_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(snake_case_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_lowerCAmelCase : Any = self.tokenizer(self.input_text , return_tensors="""pt""" )
_lowerCAmelCase : Optional[Any] = self.model_fpaa.to(torch.floataa )
_lowerCAmelCase : Optional[int] = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
_lowerCAmelCase : Dict = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
_lowerCAmelCase : Tuple = self.model_fpaa.half()
# Check this does not throw an error
_lowerCAmelCase : Optional[int] = self.model_fpaa.float()
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=snake_case_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class a_ (unittest.TestCase ):
@classmethod
def __UpperCamelCase ( cls ):
_lowerCAmelCase : Optional[int] = """t5-small"""
_lowerCAmelCase : Optional[Any] = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(cls.model_name )
_lowerCAmelCase : Optional[Any] = """Translate in German: Hello, my dog is cute"""
def __UpperCamelCase ( self ):
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
from transformers import TaForConditionalGeneration
_lowerCAmelCase : List[str] = TaForConditionalGeneration._keep_in_fpaa_modules
_lowerCAmelCase : Optional[int] = None
# test with `t5-small`
_lowerCAmelCase : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="""auto""" )
_lowerCAmelCase : List[str] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
_lowerCAmelCase : Tuple = model.generate(**snake_case_ )
# test with `flan-t5-small`
_lowerCAmelCase : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=snake_case_ , device_map="""auto""" )
_lowerCAmelCase : int = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
_lowerCAmelCase : Optional[Any] = model.generate(**snake_case_ )
_lowerCAmelCase : Optional[Any] = modules
def __UpperCamelCase ( self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_lowerCAmelCase : Optional[int] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
_lowerCAmelCase : str = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
_lowerCAmelCase : List[str] = model.generate(**snake_case_ )
# test with `flan-t5-small`
_lowerCAmelCase : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=snake_case_ , device_map="""auto""" )
_lowerCAmelCase : Optional[int] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
_lowerCAmelCase : Tuple = model.generate(**snake_case_ )
class a_ (_a ):
def __UpperCamelCase ( self ):
super().setUp()
# model_name
_lowerCAmelCase : List[str] = """bigscience/bloom-560m"""
_lowerCAmelCase : Union[str, Any] = """t5-small"""
# Different types of model
_lowerCAmelCase : Dict = AutoModel.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="""auto""" )
# Sequence classification model
_lowerCAmelCase : int = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=snake_case_ , device_map="""auto""" )
# CausalLM model
_lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="""auto""" )
# Seq2seq model
_lowerCAmelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=snake_case_ , device_map="""auto""" )
def __UpperCamelCase ( self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class a_ (_a ):
def __UpperCamelCase ( self ):
super().setUp()
def __UpperCamelCase ( self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_lowerCAmelCase : Tuple = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class a_ (_a ):
def __UpperCamelCase ( self ):
super().setUp()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=snake_case_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
_lowerCAmelCase : int = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
_lowerCAmelCase : Optional[int] = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
class a_ (_a ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = """facebook/opt-350m"""
super().setUp()
def __UpperCamelCase ( self ):
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
_lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
_lowerCAmelCase : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_lowerCAmelCase : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(snake_case_ ) ):
_lowerCAmelCase : List[Any] = LoRALayer(module.q_proj , rank=1_6 )
_lowerCAmelCase : str = LoRALayer(module.k_proj , rank=1_6 )
_lowerCAmelCase : str = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
_lowerCAmelCase : Dict = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_lowerCAmelCase : Dict = model.forward(**snake_case_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(snake_case_ , snake_case_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(snake_case_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class a_ (_a ):
__lowerCAmelCase : Any = """gpt2-xl"""
__lowerCAmelCase : Optional[Any] = 3.3191854854152187
| 309
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
| 1
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _UpperCAmelCase ( _lowerCamelCase : int=None ) -> Any:
if subparsers is not None:
_lowerCAmelCase : Tuple = subparsers.add_parser("""env""" )
else:
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=_lowerCamelCase , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase )
return parser
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> str:
_lowerCAmelCase : int = torch.__version__
_lowerCAmelCase : Optional[Any] = torch.cuda.is_available()
_lowerCAmelCase : Union[str, Any] = is_xpu_available()
_lowerCAmelCase : Optional[int] = is_npu_available()
_lowerCAmelCase : Optional[Any] = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_lowerCamelCase ):
_lowerCAmelCase : List[str] = load_config_from_file(args.config_file ).to_dict()
_lowerCAmelCase : Optional[int] = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": f'{pt_version} ({pt_cuda_available})',
"""PyTorch XPU available""": str(_lowerCamelCase ),
"""PyTorch NPU available""": str(_lowerCamelCase ),
"""System RAM""": f'{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB',
}
if pt_cuda_available:
_lowerCAmelCase : List[Any] = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([f'- {prop}: {val}' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
_lowerCAmelCase : List[str] = (
"""\n""".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(_lowerCamelCase , _lowerCamelCase )
else f'\t{accelerate_config}'
)
print(_lowerCamelCase )
_lowerCAmelCase : List[Any] = accelerate_config
return info
def _UpperCAmelCase ( ) -> int:
_lowerCAmelCase : Any = env_command_parser()
_lowerCAmelCase : Any = parser.parse_args()
env_command(_lowerCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 309
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int ) -> int:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**snake_case_ )
_lowerCAmelCase : str = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ :
def __init__( self , snake_case_ , snake_case_=1_2 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=3_2 , snake_case_=2 , snake_case_=4 , snake_case_=3_7 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=0.02 , snake_case_=0 , snake_case_=None , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : int = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Optional[int] = use_input_mask
_lowerCAmelCase : Dict = use_labels
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Optional[Any] = projection_dim
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Union[str, Any] = attention_dropout
_lowerCAmelCase : Union[str, Any] = max_position_embeddings
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : Tuple = scope
_lowerCAmelCase : List[str] = bos_token_id
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_lowerCAmelCase : Union[str, Any] = input_mask.numpy()
_lowerCAmelCase , _lowerCAmelCase : str = input_mask.shape
_lowerCAmelCase : str = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case_ ):
_lowerCAmelCase : int = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : int = self.get_config()
return config, input_ids, tf.convert_to_tensor(snake_case_ )
def __UpperCamelCase ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = TFBlipTextModel(config=snake_case_ )
_lowerCAmelCase : str = model(snake_case_ , attention_mask=snake_case_ , training=snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a_ (_a , unittest.TestCase ):
__lowerCAmelCase : int = (TFBlipTextModel,) if is_tf_available() else ()
__lowerCAmelCase : Any = False
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : str = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = BlipTextModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def __UpperCamelCase ( self ):
pass
@slow
def __UpperCamelCase ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[Any] = TFBlipTextModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def __UpperCamelCase ( self , snake_case_=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case_ )
| 309
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 1
|
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class a_ (unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=2 , snake_case_=5_6 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=2 , snake_case_=2 , snake_case_=7 , snake_case_="gelu_new" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=4 , snake_case_="block_sparse" , snake_case_=True , snake_case_=False , snake_case_=2 , snake_case_=3 , ):
_lowerCAmelCase : int = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : Optional[Any] = use_attention_mask
_lowerCAmelCase : Tuple = use_token_type_ids
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : List[Any] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Optional[int] = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : str = num_choices
_lowerCAmelCase : Tuple = rescale_embeddings
_lowerCAmelCase : Tuple = attention_type
_lowerCAmelCase : List[str] = use_bias
_lowerCAmelCase : str = block_size
_lowerCAmelCase : List[str] = num_random_blocks
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_attention_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Any = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = config_and_inputs
_lowerCAmelCase : Tuple = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class a_ (_a , unittest.TestCase ):
__lowerCAmelCase : str = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Dict = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self ):
super().test_hidden_states_output()
@slow
def __UpperCamelCase ( self ):
for model_class_name in self.all_model_classes:
_lowerCAmelCase : Any = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(snake_case_ )
def __UpperCamelCase ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : str = self._prepare_for_class(snake_case_ , snake_case_ )
_lowerCAmelCase : Tuple = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , snake_case_=None , **snake_case_ ):
return model(input_ids=snake_case_ , attention_mask=snake_case_ , **snake_case_ )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase : Optional[Any] = model_jitted(**snake_case_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase : Any = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=1E-5 , snake_case_="outputs" , snake_case_=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import math
def _UpperCAmelCase ( _lowerCamelCase : int ) -> bool:
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCAmelCase : Tuple = range(3 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple=1 , **_lowerCamelCase : Any ) -> str:
_lowerCAmelCase : str = factor * value
_lowerCAmelCase : List[Any] = value
while not is_prime(_lowerCamelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowerCamelCase )
return value
| 309
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase_ = 0
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase_ = tuple[int, int]
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = pos_x
_lowerCAmelCase : List[str] = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : List[Any] = goal_x
_lowerCAmelCase : int = goal_y
_lowerCAmelCase : Union[str, Any] = g_cost
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = self.calculate_heuristic()
_lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.pos_x - self.goal_x
_lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : int = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_lowerCAmelCase : List[str] = current_bwd_node
_lowerCAmelCase : Dict = current_fwd_node
_lowerCAmelCase : Any = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = AStar(init, goal)
UpperCamelCase_ = a_star.search()
UpperCamelCase_ = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalAStar(init, goal)
UpperCamelCase_ = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 309
| 1
|
'''simple docstring'''
UpperCamelCase_ = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCamelCase_ = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCamelCase_ = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309
| 1
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
UpperCamelCase_ = """src/transformers"""
UpperCamelCase_ = """docs/source/en/tasks"""
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] ) -> Dict:
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase : List[str] = f.readlines()
# Find the start prompt.
_lowerCAmelCase : List[Any] = 0
while not lines[start_index].startswith(_lowerCamelCase ):
start_index += 1
start_index += 1
_lowerCAmelCase : Tuple = start_index
while not lines[end_index].startswith(_lowerCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase_ = direct_transformers_import(TRANSFORMERS_PATH)
UpperCamelCase_ = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
UpperCamelCase_ = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def _UpperCAmelCase ( _lowerCamelCase : Dict ) -> Optional[Any]:
_lowerCAmelCase : Dict = TASK_GUIDE_TO_MODELS[task_guide]
_lowerCAmelCase : List[str] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_lowerCamelCase , set() )
_lowerCAmelCase : str = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=False ) -> str:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = _find_text_in_file(
filename=os.path.join(_lowerCamelCase , _lowerCamelCase ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
_lowerCAmelCase : Union[str, Any] = get_model_list_for_task(_lowerCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
""" to fix this.""" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCamelCase_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase_ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCamelCase_ = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Any ) -> Optional[Any]:
_lowerCAmelCase : Optional[int] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'config.{attribute}' in modeling_source
or f'getattr(config, "{attribute}"' in modeling_source
or f'getattr(self.config, "{attribute}"' in modeling_source
):
_lowerCAmelCase : int = True
# Deal with multi-line cases
elif (
re.search(
Rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , _lowerCamelCase , )
is not None
):
_lowerCAmelCase : List[str] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_lowerCAmelCase : Any = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_lowerCAmelCase : Optional[int] = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
_lowerCAmelCase : List[Any] = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
_lowerCAmelCase : List[str] = True
if not attribute_used:
_lowerCAmelCase : Dict = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_lowerCAmelCase : Union[str, Any] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_lowerCAmelCase : Optional[Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_lowerCAmelCase : Tuple = True
elif attribute.endswith("""_token_id""" ):
_lowerCAmelCase : Optional[Any] = True
# configuration class specific cases
if not case_allowed:
_lowerCAmelCase : List[str] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_lowerCAmelCase : Optional[int] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : List[Any] = dict(inspect.signature(config_class.__init__ ).parameters )
_lowerCAmelCase : Optional[Any] = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
_lowerCAmelCase : Tuple = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_lowerCAmelCase : Union[str, Any] = {}
if len(config_class.attribute_map ) > 0:
_lowerCAmelCase : int = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_lowerCAmelCase : List[Any] = inspect.getsourcefile(_lowerCamelCase )
_lowerCAmelCase : Any = os.path.dirname(_lowerCamelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_lowerCAmelCase : Union[str, Any] = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for fn in os.listdir(_lowerCamelCase ) if fn.startswith("""modeling_""" )]
# Get the source code strings
_lowerCAmelCase : int = []
for path in modeling_paths:
if os.path.isfile(_lowerCamelCase ):
with open(_lowerCamelCase ) as fp:
modeling_sources.append(fp.read() )
_lowerCAmelCase : Optional[int] = []
for config_param, default_value in zip(_lowerCamelCase , _lowerCamelCase ):
# `attributes` here is all the variant names for `config_param`
_lowerCAmelCase : Any = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
unused_attributes.append(attributes[0] )
return sorted(_lowerCamelCase )
def _UpperCAmelCase ( ) -> List[str]:
_lowerCAmelCase : List[str] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_lowerCAmelCase : Any = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _lowerCamelCase : inspect.isclass(_lowerCamelCase )
and issubclass(_lowerCamelCase , _lowerCamelCase )
and inspect.getmodule(_lowerCamelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_lowerCAmelCase : Any = check_config_attributes_being_used(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : Optional[int] = unused_attributes
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : List[str] = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'{name}: {attributes}\n'
raise ValueError(_lowerCamelCase )
if __name__ == "__main__":
check_config_attributes()
| 309
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ (_a , unittest.TestCase ):
__lowerCAmelCase : Any = KandinskyVaaInpaintPipeline
__lowerCAmelCase : Any = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
__lowerCAmelCase : Tuple = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
__lowerCAmelCase : Dict = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowerCAmelCase : Optional[Any] = False
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
return self.time_input_dim
@property
def __UpperCamelCase ( self ):
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self ):
return 1_0_0
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase : int = UNetaDConditionModel(**snake_case_ )
return model
@property
def __UpperCamelCase ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Any = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.dummy_unet
_lowerCAmelCase : Optional[Any] = self.dummy_movq
_lowerCAmelCase : Optional[int] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=snake_case_ , )
_lowerCAmelCase : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __UpperCamelCase ( self , snake_case_ , snake_case_=0 ):
_lowerCAmelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_lowerCAmelCase : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case_ )
# create init_image
_lowerCAmelCase : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_lowerCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Dict = Image.fromarray(np.uinta(snake_case_ ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create mask
_lowerCAmelCase : Union[str, Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
_lowerCAmelCase : Dict = 0
if str(snake_case_ ).startswith("""mps""" ):
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(snake_case_ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_lowerCAmelCase : Any = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = """cpu"""
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = self.pipeline_class(**snake_case_ )
_lowerCAmelCase : Any = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : Dict = pipe(**self.get_dummy_inputs(snake_case_ ) )
_lowerCAmelCase : List[str] = output.images
_lowerCAmelCase : List[Any] = pipe(
**self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0]
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCAmelCase : Dict = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def __UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
_lowerCAmelCase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase : int = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : str = """a hat"""
_lowerCAmelCase : int = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case_ )
_lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
_lowerCAmelCase : List[Any] = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : int = pipe_prior(
snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase : Optional[int] = pipeline(
image=snake_case_ , mask_image=snake_case_ , image_embeds=snake_case_ , negative_image_embeds=snake_case_ , generator=snake_case_ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="""np""" , )
_lowerCAmelCase : List[Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 309
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 1
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase_ = 16
UpperCamelCase_ = 32
def _UpperCAmelCase ( _lowerCamelCase : str ) -> List[str]:
return int(x / 2**20 )
class a_ :
def __enter__( self ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_lowerCAmelCase : Any = torch.cuda.memory_allocated()
return self
def __exit__( self , *snake_case_ ):
gc.collect()
torch.cuda.empty_cache()
_lowerCAmelCase : str = torch.cuda.memory_allocated()
_lowerCAmelCase : Dict = torch.cuda.max_memory_allocated()
_lowerCAmelCase : Union[str, Any] = bamb(self.end - self.begin )
_lowerCAmelCase : Optional[int] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def _UpperCAmelCase ( _lowerCamelCase : Accelerator , _lowerCamelCase : int = 16 , _lowerCamelCase : str = "bert-base-cased" , _lowerCamelCase : int = 3_20 , _lowerCamelCase : int = 1_60 , ) -> str:
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Any = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": f'train[:{n_train}]', """validation""": f'validation[:{n_val}]'} )
def tokenize_function(_lowerCamelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCAmelCase : Optional[int] = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_lowerCamelCase : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCamelCase , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(_lowerCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
_lowerCAmelCase : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
_lowerCAmelCase : Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : int ) -> List[Any]:
# Initialize accelerator
_lowerCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : str = config["""lr"""]
_lowerCAmelCase : List[str] = int(config["""num_epochs"""] )
_lowerCAmelCase : Optional[Any] = int(config["""seed"""] )
_lowerCAmelCase : List[str] = int(config["""batch_size"""] )
_lowerCAmelCase : Optional[int] = args.model_name_or_path
set_seed(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : int = get_dataloaders(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : int = AutoModelForSequenceClassification.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
# Instantiate optimizer
_lowerCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCAmelCase : Any = optimizer_cls(params=model.parameters() , lr=_lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
_lowerCAmelCase : Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : int = (len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCAmelCase : List[Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=0 , num_training_steps=_lowerCamelCase , )
else:
_lowerCAmelCase : Any = DummyScheduler(_lowerCamelCase , total_num_steps=_lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
_lowerCAmelCase : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
_lowerCAmelCase : Dict = 0
# Now we train the model
_lowerCAmelCase : Any = {}
for epoch in range(_lowerCamelCase , _lowerCamelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = model(**_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = outputs.loss
_lowerCAmelCase : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_lowerCAmelCase : str = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _UpperCAmelCase ( ) -> List[Any]:
_lowerCAmelCase : Any = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=_lowerCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_lowerCamelCase , )
parser.add_argument(
"""--output_dir""" , type=_lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=_lowerCamelCase , default=_lowerCamelCase , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=_lowerCamelCase , default=3_20 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=_lowerCamelCase , default=1_60 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=_lowerCamelCase , default=1 , help="""Number of train epochs.""" , )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : Union[str, Any] = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 309
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# ===== initialization =====
_lowerCAmelCase : Tuple = Mock()
_lowerCAmelCase : Any = conn, Mock()
_lowerCAmelCase : Optional[Any] = iter([1, None] )
_lowerCAmelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 309
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase_ = {
"""yjernite/retribert-base-uncased""": 5_12,
}
UpperCamelCase_ = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : str = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Any = RetriBertTokenizer
__lowerCAmelCase : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Union[str, Any] = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : str = do_lower_case
_lowerCAmelCase : Dict = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**snake_case_ )
_lowerCAmelCase : Optional[Any] = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : List[Any] = [self.sep_token_id]
_lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : int = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
| 1
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=snake_case_ , dtype=jnp.bfloataa )
_lowerCAmelCase , _lowerCAmelCase : Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case_ , from_pt=snake_case_ , dtype=jnp.bfloataa )
_lowerCAmelCase : int = controlnet_params
_lowerCAmelCase : Dict = """bird"""
_lowerCAmelCase : str = jax.device_count()
_lowerCAmelCase : Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples )
_lowerCAmelCase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
_lowerCAmelCase : Dict = pipe.prepare_image_inputs([canny_image] * num_samples )
_lowerCAmelCase : List[str] = jax.random.PRNGKey(0 )
_lowerCAmelCase : Tuple = jax.random.split(snake_case_ , jax.device_count() )
_lowerCAmelCase : int = replicate(snake_case_ )
_lowerCAmelCase : Optional[Any] = shard(snake_case_ )
_lowerCAmelCase : List[Any] = shard(snake_case_ )
_lowerCAmelCase : int = pipe(
prompt_ids=snake_case_ , image=snake_case_ , params=snake_case_ , prng_seed=snake_case_ , num_inference_steps=5_0 , jit=snake_case_ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
_lowerCAmelCase : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCAmelCase : Union[str, Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_lowerCAmelCase : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCAmelCase : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=snake_case_ , dtype=jnp.bfloataa )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case_ , from_pt=snake_case_ , dtype=jnp.bfloataa )
_lowerCAmelCase : Union[str, Any] = controlnet_params
_lowerCAmelCase : str = """Chef in the kitchen"""
_lowerCAmelCase : List[str] = jax.device_count()
_lowerCAmelCase : Any = pipe.prepare_text_inputs([prompts] * num_samples )
_lowerCAmelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
_lowerCAmelCase : Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
_lowerCAmelCase : Dict = jax.random.PRNGKey(0 )
_lowerCAmelCase : Tuple = jax.random.split(snake_case_ , jax.device_count() )
_lowerCAmelCase : Tuple = replicate(snake_case_ )
_lowerCAmelCase : List[Any] = shard(snake_case_ )
_lowerCAmelCase : Optional[Any] = shard(snake_case_ )
_lowerCAmelCase : Tuple = pipe(
prompt_ids=snake_case_ , image=snake_case_ , params=snake_case_ , prng_seed=snake_case_ , num_inference_steps=5_0 , jit=snake_case_ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
_lowerCAmelCase : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCAmelCase : Optional[int] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_lowerCAmelCase : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCAmelCase : Optional[Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 309
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309
| 1
|
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _UpperCAmelCase ( *_lowerCamelCase : Optional[int] ) -> Tuple:
with open(_lowerCamelCase , """r""" ) as fh:
fcntl.flock(_lowerCamelCase , fcntl.LOCK_EX )
try:
print(*_lowerCamelCase )
finally:
fcntl.flock(_lowerCamelCase , fcntl.LOCK_UN )
UpperCamelCase_ = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
UpperCamelCase_ = torch.device("""cuda""", local_rank)
UpperCamelCase_ = socket.gethostname()
UpperCamelCase_ = F'[{hostname}-{local_rank}]'
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCamelCase_ = dist.get_rank()
UpperCamelCase_ = dist.get_world_size()
printflock(F'{gpu} is OK (global rank: {rank}/{world_size})')
dist.barrier()
if rank == 0:
printflock(F'pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}')
except Exception:
printflock(F'{gpu} is broken')
raise
| 309
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
| 1
|
'''simple docstring'''
import re
def _UpperCAmelCase ( _lowerCamelCase : str ) -> bool:
_lowerCAmelCase : List[Any] = re.compile(
R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" )
return bool(re.search(_lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
UpperCamelCase_ = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 309
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
| 1
|
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any]=None , **_lowerCamelCase : Optional[int] ) -> Tuple:
_lowerCAmelCase : List[str] = [x.strip() for x in open(_lowerCamelCase ).readlines()]
_lowerCAmelCase : int = [x.strip() for x in open(_lowerCamelCase ).readlines()][: len(_lowerCamelCase )]
_lowerCAmelCase : int = calculate_rouge(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
if save_path is not None:
save_json(_lowerCamelCase , _lowerCamelCase , indent=_lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 309
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309
| 1
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : List[Any] = min(_lowerCamelCase ) # min() finds the minimum value
_lowerCAmelCase : Tuple = max(_lowerCamelCase ) # max() finds the maximum value
_lowerCAmelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase : Dict = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase : Any = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase : Optional[int] = count + min_val
i += 1
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" , """ """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 309
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a_ (_a , unittest.TestCase ):
__lowerCAmelCase : List[Any] = MobileBertTokenizer
__lowerCAmelCase : List[str] = MobileBertTokenizerFast
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Dict = True
__lowerCAmelCase : str = filter_non_english
__lowerCAmelCase : List[str] = """google/mobilebert-uncased"""
def __UpperCamelCase ( self ):
super().setUp()
_lowerCAmelCase : Optional[int] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_lowerCAmelCase : Optional[Any] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = """UNwant\u00E9d,running"""
_lowerCAmelCase : Dict = """unwanted, running"""
return input_text, output_text
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase : Optional[int] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(snake_case_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Tuple = self.get_rust_tokenizer()
_lowerCAmelCase : List[str] = """UNwant\u00E9d,running"""
_lowerCAmelCase : Dict = tokenizer.tokenize(snake_case_ )
_lowerCAmelCase : Dict = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
_lowerCAmelCase : int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Any = tokenizer.encode(snake_case_ )
_lowerCAmelCase : Any = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# With lower casing
_lowerCAmelCase : List[Any] = self.get_tokenizer(do_lower_case=snake_case_ )
_lowerCAmelCase : List[str] = self.get_rust_tokenizer(do_lower_case=snake_case_ )
_lowerCAmelCase : List[Any] = """UNwant\u00E9d,running"""
_lowerCAmelCase : int = tokenizer.tokenize(snake_case_ )
_lowerCAmelCase : List[str] = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[Any] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
_lowerCAmelCase : str = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = tokenizer.encode(snake_case_ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = BasicTokenizer(do_lower_case=snake_case_ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_lowerCAmelCase : Dict = {}
for i, token in enumerate(snake_case_ ):
_lowerCAmelCase : Any = i
_lowerCAmelCase : int = WordpieceTokenizer(vocab=snake_case_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __UpperCamelCase ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __UpperCamelCase ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __UpperCamelCase ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(snake_case_ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(snake_case_ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
_lowerCAmelCase : str = tokenizer.encode("""sequence builders""" , add_special_tokens=snake_case_ )
_lowerCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=snake_case_ )
_lowerCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_lowerCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_lowerCAmelCase : str = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
_lowerCAmelCase : Optional[Any] = tokenizer_r.encode_plus(
snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , return_offsets_mapping=snake_case_ , add_special_tokens=snake_case_ , )
_lowerCAmelCase : List[str] = tokenizer_r.do_lower_case if hasattr(snake_case_ , """do_lower_case""" ) else False
_lowerCAmelCase : Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """Allen"""),
((2_1, 2_3), """##NL"""),
((2_3, 2_4), """##P"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """allen"""),
((2_1, 2_3), """##nl"""),
((2_3, 2_4), """##p"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = ["""的""", """人""", """有"""]
_lowerCAmelCase : Union[str, Any] = """""".join(snake_case_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : str = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_lowerCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_lowerCAmelCase : Dict = tokenizer_p.encode(snake_case_ , add_special_tokens=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer_r.encode(snake_case_ , add_special_tokens=snake_case_ )
_lowerCAmelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(snake_case_ )
_lowerCAmelCase : Any = tokenizer_p.convert_ids_to_tokens(snake_case_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
_lowerCAmelCase : str = False
_lowerCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = tokenizer_r.encode(snake_case_ , add_special_tokens=snake_case_ )
_lowerCAmelCase : List[str] = tokenizer_p.encode(snake_case_ , add_special_tokens=snake_case_ )
_lowerCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(snake_case_ )
_lowerCAmelCase : str = tokenizer_p.convert_ids_to_tokens(snake_case_ )
# it is expected that only the first Chinese character is not preceded by "##".
_lowerCAmelCase : Optional[Any] = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(snake_case_ )
]
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309
| 1
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase_ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase_ = [0, 25, 50]
UpperCamelCase_ = [25, 50, 75]
UpperCamelCase_ = fuzz.membership.trimf(X, abca)
UpperCamelCase_ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase_ = np.ones(75)
UpperCamelCase_ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase_ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase_ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase_ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase_ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase_ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase_ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase_ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase_ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""DPTFeatureExtractor"""]
UpperCamelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
UpperCamelCase_ = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def _UpperCAmelCase ( ) -> str:
_lowerCAmelCase : List[Any] = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase : Dict = g.get_repo("""huggingface/diffusers""" )
_lowerCAmelCase : Optional[Any] = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase : List[Any] = sorted(issue.get_comments() , key=lambda _lowerCamelCase : i.created_at , reverse=_lowerCamelCase )
_lowerCAmelCase : List[Any] = comments[0] if len(_lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 309
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : list[float] ) -> Dict:
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 10_00 ) -> int:
_lowerCAmelCase : int = -1
_lowerCAmelCase : Optional[Any] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
_lowerCAmelCase : Optional[int] = (n * n - 2 * a * n) // (2 * n - 2 * a)
_lowerCAmelCase : Optional[Any] = n - a - b
if c * c == (a * a + b * b):
_lowerCAmelCase : List[Any] = a * b * c
if candidate >= product:
_lowerCAmelCase : Union[str, Any] = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 309
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309
| 1
|
'''simple docstring'''
UpperCamelCase_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCamelCase_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _UpperCAmelCase ( _lowerCamelCase : dict[int, list[int]] , _lowerCamelCase : int , _lowerCamelCase : list[bool] ) -> list[int]:
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Dict = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
order.append(_lowerCamelCase )
return order
def _UpperCAmelCase ( _lowerCamelCase : dict[int, list[int]] , _lowerCamelCase : int , _lowerCamelCase : list[bool] ) -> list[int]:
_lowerCAmelCase : int = True
_lowerCAmelCase : List[str] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return component
def _UpperCAmelCase ( _lowerCamelCase : dict[int, list[int]] ) -> list[list[int]]:
_lowerCAmelCase : Optional[Any] = len(_lowerCamelCase ) * [False]
_lowerCAmelCase : dict[int, list[int]] = {vert: [] for vert in range(len(_lowerCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_lowerCamelCase )
_lowerCAmelCase : List[str] = []
for i, was_visited in enumerate(_lowerCamelCase ):
if not was_visited:
order += topology_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = len(_lowerCamelCase ) * [False]
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = order[len(_lowerCamelCase ) - i - 1]
if not visited[vert]:
_lowerCAmelCase : List[Any] = find_components(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
components_list.append(_lowerCamelCase )
return components_list
| 309
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**snake_case_ )
_lowerCAmelCase : str = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309
| 1
|
'''simple docstring'''
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : np.array ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=[1, 1, 2] , snake_case_=1 , snake_case_=3_2 , snake_case_=4 , snake_case_=8 , snake_case_=3_7 , snake_case_="gelu_new" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=5_1_2 , snake_case_=3 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , snake_case_=False , ):
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Union[str, Any] = seq_length
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : Optional[int] = use_input_mask
_lowerCAmelCase : Optional[int] = use_token_type_ids
_lowerCAmelCase : Tuple = use_labels
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : Dict = block_sizes
_lowerCAmelCase : Optional[int] = num_decoder_layers
_lowerCAmelCase : List[Any] = d_model
_lowerCAmelCase : Dict = n_head
_lowerCAmelCase : str = d_head
_lowerCAmelCase : List[Any] = d_inner
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout
_lowerCAmelCase : Dict = attention_dropout
_lowerCAmelCase : Tuple = activation_dropout
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : Tuple = 2
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Any = num_choices
_lowerCAmelCase : Any = scope
_lowerCAmelCase : Any = initializer_std
# Used in the tests to check the size of the first attention layer
_lowerCAmelCase : Union[str, Any] = n_head
# Used in the tests to check the size of the first hidden state
_lowerCAmelCase : List[Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
_lowerCAmelCase : Optional[int] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
_lowerCAmelCase : Optional[int] = self.num_hidden_layers + 2
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Dict = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : List[Any] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[str] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : List[str] = TFFunnelModel(config=snake_case_ )
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : Dict = model(snake_case_ )
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : Optional[int] = model(snake_case_ )
_lowerCAmelCase : int = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_lowerCAmelCase : str = False
_lowerCAmelCase : str = TFFunnelModel(config=snake_case_ )
_lowerCAmelCase : int = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Optional[Any] = TFFunnelModel(config=snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=snake_case_ )
_lowerCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : Tuple = model(snake_case_ )
_lowerCAmelCase : int = [input_ids, input_mask]
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
_lowerCAmelCase : int = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
_lowerCAmelCase : Any = False
_lowerCAmelCase : List[Any] = TFFunnelBaseModel(config=snake_case_ )
_lowerCAmelCase : Tuple = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : Optional[Any] = TFFunnelBaseModel(config=snake_case_ )
_lowerCAmelCase : List[str] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : List[Any] = TFFunnelForPreTraining(config=snake_case_ )
_lowerCAmelCase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : Any = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Any = TFFunnelForMaskedLM(config=snake_case_ )
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : Dict = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Tuple = self.num_labels
_lowerCAmelCase : List[Any] = TFFunnelForSequenceClassification(config=snake_case_ )
_lowerCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : List[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : List[Any] = self.num_choices
_lowerCAmelCase : Any = TFFunnelForMultipleChoice(config=snake_case_ )
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : str = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : List[Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_lowerCAmelCase : int = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Optional[int] = TFFunnelForTokenClassification(config=snake_case_ )
_lowerCAmelCase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : List[str] = TFFunnelForQuestionAnswering(config=snake_case_ )
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : str = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Any = config_and_inputs
_lowerCAmelCase : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCAmelCase : str = False
__lowerCAmelCase : Union[str, Any] = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = TFFunnelModelTester(self )
_lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=snake_case_ )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
@require_tf
class a_ (_a , unittest.TestCase ):
__lowerCAmelCase : List[Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__lowerCAmelCase : Any = False
__lowerCAmelCase : Union[str, Any] = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = TFFunnelModelTester(self , base=snake_case_ )
_lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=snake_case_ )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger("transformers.models.speecht5")
def _a ( a :Any , a :List[str] , a :Tuple ) -> int:
hf_model.apply_weight_norm()
a = checkpoint['''input_conv.weight_g''']
a = checkpoint['''input_conv.weight_v''']
a = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
a = checkpoint[F"""upsamples.{i}.1.weight_g"""]
a = checkpoint[F"""upsamples.{i}.1.weight_v"""]
a = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
a = checkpoint['''output_conv.1.weight_g''']
a = checkpoint['''output_conv.1.weight_v''']
a = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( a :Dict , a :int , a :Tuple , a :Dict=None , a :List[Any]=None , ) -> Optional[Any]:
if config_path is not None:
a = SpeechTaHifiGanConfig.from_pretrained(a )
else:
a = SpeechTaHifiGanConfig()
a = SpeechTaHifiGan(a )
a = torch.load(a )
load_weights(orig_checkpoint['''model''']['''generator'''] , a , a )
a = np.load(a )
a = stats[0].reshape(-1 )
a = stats[1].reshape(-1 )
a = torch.from_numpy(a ).float()
a = torch.from_numpy(a ).float()
model.save_pretrained(a )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCAmelCase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 0
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase_ = 0
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase_ = tuple[int, int]
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = pos_x
_lowerCAmelCase : List[str] = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : List[Any] = goal_x
_lowerCAmelCase : int = goal_y
_lowerCAmelCase : Union[str, Any] = g_cost
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = self.calculate_heuristic()
_lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.pos_x - self.goal_x
_lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : int = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_lowerCAmelCase : List[str] = current_bwd_node
_lowerCAmelCase : Dict = current_fwd_node
_lowerCAmelCase : Any = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = AStar(init, goal)
UpperCamelCase_ = a_star.search()
UpperCamelCase_ = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalAStar(init, goal)
UpperCamelCase_ = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 309
| 0
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=__a ).to(__a )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = model(input_ids.to(__a ) , labels=labels.to(__a ) ).loss
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = tempfile.mkdtemp()
lowercase__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowercase__ = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Tuple , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase__ (self : Tuple , **UpperCamelCase : List[str] ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase__ (self : List[Any] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase__ (self : str ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
lowercase__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(UpperCamelCase , return_tensors='''np''' )
lowercase__ = processor(images=UpperCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowercase__ = '''lower newer'''
lowercase__ = processor(text=UpperCamelCase )
lowercase__ = tokenizer(UpperCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowercase__ = '''lower newer'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(UpperCamelCase )
lowercase__ = tokenizer.batch_decode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowercase__ = '''lower newer'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 2
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCAmelCase_ ( snake_case__="" ):
'''simple docstring'''
A : Optional[Any] = tempfile.mkdtemp()
return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : str = torch.rand(12 , dtype=torch.floataa ) - 0.5
A : int = AgentAudio(SCREAMING_SNAKE_CASE )
A : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE ) )
# Ensure that the file contains the same value as the original tensor
A, A : Any = sf.read(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , torch.tensor(SCREAMING_SNAKE_CASE ) , atol=1e-4 ) )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = torch.rand(12 , dtype=torch.floataa ) - 0.5
A : int = get_new_path(suffix='''.wav''' )
sf.write(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 16000 )
A : Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , SCREAMING_SNAKE_CASE )
@require_vision
@require_torch
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : int = torch.randint(0 , 256 , (64, 64, 3) )
A : List[Any] = AgentImage(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : List[Any] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
A : Dict = Image.open(SCREAMING_SNAKE_CASE )
A : int = AgentImage(SCREAMING_SNAKE_CASE )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
A : List[str] = Image.open(SCREAMING_SNAKE_CASE )
A : List[str] = AgentImage(SCREAMING_SNAKE_CASE )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE ) )
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Dict = '''Hey!'''
A : Any = AgentText(SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , agent_type.to_string() )
self.assertEqual(SCREAMING_SNAKE_CASE , agent_type.to_raw() )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 3
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class UpperCAmelCase_ ( __lowercase , __lowercase ):
lowerCamelCase : Any = '''resnet'''
lowerCamelCase : Optional[Any] = ['''basic''', '''bottleneck''']
def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : int=6_4 , UpperCAmelCase__ : List[Any]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , UpperCAmelCase__ : Dict=[3, 4, 6, 3] , UpperCAmelCase__ : List[Any]="bottleneck" , UpperCAmelCase__ : Dict="relu" , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : List[str] , ) -> List[str]:
super().__init__(**UpperCAmelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
lowerCAmelCase = num_channels
lowerCAmelCase = embedding_size
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = layer_type
lowerCAmelCase = hidden_act
lowerCAmelCase = downsample_in_first_stage
lowerCAmelCase = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(UpperCAmelCase__ ) + 1 )]
lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Any = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> float:
return 1E-3
| 4
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
| 0
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE__ = '''BlipImageProcessor'''
SCREAMING_SNAKE_CASE__ = '''AutoTokenizer'''
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
super().__init__(UpperCAmelCase , UpperCAmelCase )
# add QFormer tokenizer
_lowercase =qformer_tokenizer
def __call__(self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
_lowercase =BatchFeature()
if text is not None:
_lowercase =self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
encoding.update(UpperCAmelCase )
_lowercase =self.qformer_tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
_lowercase =qformer_text_encoding.pop('''input_ids''' )
_lowercase =qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
_lowercase =self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase )
encoding.update(UpperCAmelCase )
return encoding
def __A (self , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __A (self , *UpperCAmelCase , **UpperCAmelCase ) -> int:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __A (self ) -> List[Any]:
_lowercase =self.tokenizer.model_input_names
_lowercase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __A (self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
if os.path.isfile(UpperCAmelCase ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
_lowercase =os.path.join(UpperCAmelCase , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase )
return super().save_pretrained(UpperCAmelCase , **UpperCAmelCase )
@classmethod
def __A (cls , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
_lowercase =AutoTokenizer.from_pretrained(UpperCAmelCase , subfolder='''qformer_tokenizer''' )
_lowercase =cls._get_arguments_from_pretrained(UpperCAmelCase , **UpperCAmelCase )
args.append(UpperCAmelCase )
return cls(*UpperCAmelCase )
| 5
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 0
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
A : str = logging.get_logger(__name__)
A : Union[str, Any] = 'Hello, World!'
A : Any = 'en_XX'
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Any:
__a = Path('''data_bin''' )
__a = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a__ ).parent ) , checkpoint_file=Path(a__ ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a__ ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a__ ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(a__ )
__a = xmod.model.encoder.sentence_encoder
__a = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__a = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a__ )
__a = XmodForSequenceClassification(a__ ) if classification_head else XmodForMaskedLM(a__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__a = xmod_sent_encoder.embed_tokens.weight
__a = xmod_sent_encoder.embed_positions.weight
__a = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__a = xmod_sent_encoder.layernorm_embedding.weight
__a = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__a = model.roberta.encoder.layer[i]
__a = xmod_sent_encoder.layers[i]
# self attention
__a = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
__a = xmod_layer.self_attn.q_proj.weight
__a = xmod_layer.self_attn.q_proj.bias
__a = xmod_layer.self_attn.k_proj.weight
__a = xmod_layer.self_attn.k_proj.bias
__a = xmod_layer.self_attn.v_proj.weight
__a = xmod_layer.self_attn.v_proj.bias
# self-attention output
__a = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
__a = xmod_layer.self_attn.out_proj.weight
__a = xmod_layer.self_attn.out_proj.bias
__a = xmod_layer.self_attn_layer_norm.weight
__a = xmod_layer.self_attn_layer_norm.bias
# intermediate
__a = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
__a = xmod_layer.fca.weight
__a = xmod_layer.fca.bias
# output
__a = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
__a = xmod_layer.fca.weight
__a = xmod_layer.fca.bias
__a = xmod_layer.final_layer_norm.weight
__a = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__a = xmod_layer.adapter_layer_norm.weight
__a = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__a = bert_output.adapter_modules[lang_code]
__a = xmod_layer.adapter_modules[lang_code]
__a = from_adapter.fca.weight
__a = from_adapter.fca.bias
__a = from_adapter.fca.weight
__a = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__a = xmod_sent_encoder.layer_norm.weight
__a = xmod_sent_encoder.layer_norm.bias
if classification_head:
__a = xmod.model.classification_heads['''mnli'''].dense.weight
__a = xmod.model.classification_heads['''mnli'''].dense.bias
__a = xmod.model.classification_heads['''mnli'''].out_proj.weight
__a = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
__a = xmod.model.encoder.lm_head.dense.weight
__a = xmod.model.encoder.lm_head.dense.bias
__a = xmod.model.encoder.lm_head.layer_norm.weight
__a = xmod.model.encoder.lm_head.layer_norm.bias
__a = xmod.model.encoder.lm_head.weight
__a = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__a = xmod.encode(a__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(a__ )
__a = model(a__ )[0]
if classification_head:
__a = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a__ ) )
else:
__a = xmod.model(a__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__a = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__a = torch.allclose(a__ , a__ , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(a__ ).mkdir(parents=a__ , exist_ok=a__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
A : Optional[int] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 6
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# ===== initialization =====
_lowerCAmelCase : Tuple = Mock()
_lowerCAmelCase : Any = conn, Mock()
_lowerCAmelCase : Optional[Any] = iter([1, None] )
_lowerCAmelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 309
| 0
|
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int = 0 ) -> list:
'''simple docstring'''
A__ = length or len(SCREAMING_SNAKE_CASE__ )
A__ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A__ , A__ = list_data[i + 1], list_data[i]
A__ = True
return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE__ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
| 0
|
import json
import sys
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as f:
snake_case_ = json.load(SCREAMING_SNAKE_CASE__ )
snake_case_ = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(SCREAMING_SNAKE_CASE__ ):
snake_case_ = results[benchmark_name]
snake_case_ = benchmark_name.split('''/''' )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
snake_case_ = '''| metric |'''
snake_case_ = '''|--------|'''
snake_case_ = '''| new / old (diff) |'''
for metric_name in sorted(SCREAMING_SNAKE_CASE__ ):
snake_case_ = benchmark_res[metric_name]
snake_case_ = metric_vals['''new''']
snake_case_ = metric_vals.get('''old''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = metric_vals.get('''diff''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = F''' {new_val:f}''' if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ) else '''None'''
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
lowerCAmelCase_ = sys.argv[1]
lowerCAmelCase_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 8
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309
| 0
|
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCamelCase ( lowercase__ ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0]
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return math.pow(lowercase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 9
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
| 0
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: int =9, 14 # noqa: F841
lowerCamelCase__: List[Any] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase__: List[str] =defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase__: List[str] =mst(__a )
lowerCamelCase__: Union[str, Any] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase__: Optional[int] =tuple(answer[:2] )
lowerCamelCase__: List[Any] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 10
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "deit"
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , **__lowerCamelCase , ) -> List[Any]:
super().__init__(**__lowerCamelCase)
_A : Dict = hidden_size
_A : Tuple = num_hidden_layers
_A : List[str] = num_attention_heads
_A : int = intermediate_size
_A : Optional[int] = hidden_act
_A : List[str] = hidden_dropout_prob
_A : Tuple = attention_probs_dropout_prob
_A : int = initializer_range
_A : Any = layer_norm_eps
_A : Tuple = image_size
_A : Union[str, Any] = patch_size
_A : str = num_channels
_A : Dict = qkv_bias
_A : Dict = encoder_stride
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = version.parse("1.11")
@property
def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def _lowerCamelCase ( self) -> float:
return 1e-4
| 11
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase__( unittest.TestCase):
def __init__( self: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any]=7 , UpperCamelCase_: int=3 , UpperCamelCase_: str=18 , UpperCamelCase_: List[Any]=30 , UpperCamelCase_: Tuple=4_00 , UpperCamelCase_: Dict=True , UpperCamelCase_: List[str]=None , UpperCamelCase_: Dict=True , ):
__lowerCamelCase = size if size is not None else {"""height""": 18, """width""": 18}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = apply_ocr
def lowerCAmelCase__ ( self: int ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self: Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """apply_ocr""" ) )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase__ ( self: Union[str, Any] ):
pass
def lowerCAmelCase__ ( self: Optional[int] ):
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , UpperCamelCase_ )
self.assertIsInstance(encoding.boxes , UpperCamelCase_ )
# Test batched
__lowerCamelCase = image_processing(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase__ ( self: Tuple ):
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__lowerCamelCase = image_processing(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase__ ( self: List[str] ):
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__lowerCamelCase = image_processing(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
# with apply_OCR = True
__lowerCamelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowerCamelCase = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
__lowerCamelCase = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
__lowerCamelCase = image_processing(UpperCamelCase_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowerCamelCase = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
__lowerCamelCase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCamelCase_ )
self.assertListEqual(encoding.boxes , UpperCamelCase_ )
# with apply_OCR = False
__lowerCamelCase = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase_ )
__lowerCamelCase = image_processing(UpperCamelCase_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 12
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : List[Any] = min(_lowerCamelCase ) # min() finds the minimum value
_lowerCAmelCase : Tuple = max(_lowerCamelCase ) # max() finds the maximum value
_lowerCAmelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase : Dict = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase : Any = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase : Optional[int] = count + min_val
i += 1
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" , """ """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 309
| 0
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = image.size
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE_: Tuple = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
SCREAMING_SNAKE_CASE_: str = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_: Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE_: str = torch.from_numpy(_UpperCAmelCase )
return 2.0 * image - 1.0
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : VQModel , lowerCAmelCase__ : UNetaDModel , lowerCAmelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__)
@torch.no_grad()
def __call__( self : str , lowerCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 100 , lowerCAmelCase__ : Optional[float] = 0.0 , lowerCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , ):
if isinstance(lowerCAmelCase__ , PIL.Image.Image):
SCREAMING_SNAKE_CASE_: List[str] = 1
elif isinstance(lowerCAmelCase__ , torch.Tensor):
SCREAMING_SNAKE_CASE_: str = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase__)}")
if isinstance(lowerCAmelCase__ , PIL.Image.Image):
SCREAMING_SNAKE_CASE_: Dict = preprocess(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
SCREAMING_SNAKE_CASE_: List[str] = (batch_size, self.unet.config.in_channels // 2, height, width)
SCREAMING_SNAKE_CASE_: List[str] = next(self.unet.parameters()).dtype
SCREAMING_SNAKE_CASE_: Dict = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = image.to(device=self.device , dtype=lowerCAmelCase__)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCAmelCase__ , device=self.device)
SCREAMING_SNAKE_CASE_: List[str] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE_: List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE_: List[str] = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
if accepts_eta:
SCREAMING_SNAKE_CASE_: int = eta
for t in self.progress_bar(lowerCAmelCase__):
# concat latents and low resolution image in the channel dimension.
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.cat([latents, image] , dim=1)
SCREAMING_SNAKE_CASE_: List[Any] = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__)
# predict the noise residual
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_: int = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample
# decode the image latents with the VQVAE
SCREAMING_SNAKE_CASE_: int = self.vqvae.decode(lowerCAmelCase__).sample
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.clamp(lowerCAmelCase__ , -1.0 , 1.0)
SCREAMING_SNAKE_CASE_: Optional[int] = image / 2 + 0.5
SCREAMING_SNAKE_CASE_: Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_: Dict = self.numpy_to_pil(lowerCAmelCase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__)
| 13
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309
| 0
|
from __future__ import annotations
_lowerCamelCase : Optional[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowerCamelCase : Any = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[float]:
"""simple docstring"""
A__ = []
A__ = len(lowercase_ )
for i in range(lowercase_ ):
A__ = -1
for j in range(i + 1 , lowercase_ ):
if arr[i] < arr[j]:
A__ = arr[j]
break
result.append(lowercase_ )
return result
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[float]:
"""simple docstring"""
A__ = []
for i, outer in enumerate(lowercase_ ):
A__ = -1
for inner in arr[i + 1 :]:
if outer < inner:
A__ = inner
break
result.append(lowercase_ )
return result
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[float]:
"""simple docstring"""
A__ = len(lowercase_ )
A__ = []
A__ = [-1] * arr_size
for index in reversed(range(lowercase_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
A__ = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowerCamelCase : int = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 14
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
SCREAMING_SNAKE_CASE :List[str] = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 15
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""DPTFeatureExtractor"""]
UpperCamelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = "roc_bert"
def __init__( self : List[str] ,_snake_case : Any=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : Union[str, Any]=12 ,_snake_case : List[Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : Optional[int]="gelu" ,_snake_case : int=0.1 ,_snake_case : Any=0.1 ,_snake_case : int=512 ,_snake_case : Optional[int]=2 ,_snake_case : List[str]=0.02 ,_snake_case : Dict=1e-12 ,_snake_case : str=True ,_snake_case : Tuple=0 ,_snake_case : List[str]="absolute" ,_snake_case : Optional[Any]=None ,_snake_case : Union[str, Any]=True ,_snake_case : Optional[Any]=True ,_snake_case : List[Any]=768 ,_snake_case : Dict=910 ,_snake_case : List[str]=512 ,_snake_case : List[str]=24_858 ,_snake_case : Tuple=True ,**_snake_case : str ,) -> int:
"""simple docstring"""
lowercase__ : Union[str, Any] = vocab_size
lowercase__ : int = max_position_embeddings
lowercase__ : Optional[Any] = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Tuple = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Optional[int] = initializer_range
lowercase__ : int = type_vocab_size
lowercase__ : int = layer_norm_eps
lowercase__ : List[Any] = use_cache
lowercase__ : List[str] = enable_pronunciation
lowercase__ : Tuple = enable_shape
lowercase__ : Optional[Any] = pronunciation_embed_dim
lowercase__ : Tuple = pronunciation_vocab_size
lowercase__ : Optional[Any] = shape_embed_dim
lowercase__ : List[Any] = shape_vocab_size
lowercase__ : int = concat_input
lowercase__ : str = position_embedding_type
lowercase__ : Dict = classifier_dropout
super().__init__(pad_token_id=_snake_case ,**_snake_case )
| 16
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : list[float] ) -> Dict:
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str], UpperCAmelCase__ : Any ):
__lowercase = data
__lowercase = None
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
__lowercase = None
__lowercase = None
def __iter__( self : List[Any] ):
__lowercase = self.head
while self.head:
yield node.data
__lowercase = node.next
if node == self.head:
break
def __len__( self : Union[str, Any] ):
return sum(1 for _ in self )
def __repr__( self : List[str] ):
return "->".join(str(UpperCAmelCase__ ) for item in iter(self ) )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Any ):
self.insert_nth(len(self ), UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Any ):
self.insert_nth(0, UpperCAmelCase__ )
def _lowercase ( self : str, UpperCAmelCase__ : int, UpperCAmelCase__ : Any ):
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
__lowercase = Node(UpperCAmelCase__ )
if self.head is None:
__lowercase = new_node # first node points itself
__lowercase = __lowercase = new_node
elif index == 0: # insert at head
__lowercase = self.head
__lowercase = __lowercase = new_node
else:
__lowercase = self.head
for _ in range(index - 1 ):
__lowercase = temp.next
__lowercase = temp.next
__lowercase = new_node
if index == len(self ) - 1: # insert at tail
__lowercase = new_node
def _lowercase ( self : List[Any] ):
return self.delete_nth(0 )
def _lowercase ( self : List[Any] ):
return self.delete_nth(len(self ) - 1 )
def _lowercase ( self : List[str], UpperCAmelCase__ : int = 0 ):
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
__lowercase = self.head
if self.head == self.tail: # just one node
__lowercase = __lowercase = None
elif index == 0: # delete head node
__lowercase = self.tail.next.next
__lowercase = self.head.next
else:
__lowercase = self.head
for _ in range(index - 1 ):
__lowercase = temp.next
__lowercase = temp.next
__lowercase = temp.next.next
if index == len(self ) - 1: # delete at tail
__lowercase = temp
return delete_node.data
def _lowercase ( self : Optional[Any] ):
return len(self ) == 0
def _A ( ) -> None:
'''simple docstring'''
__lowercase = CircularLinkedList()
assert len(UpperCamelCase_) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCamelCase_) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1)
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0)
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5):
assert len(UpperCamelCase_) == i
circular_linked_list.insert_nth(UpperCamelCase_, i + 1)
assert str(UpperCamelCase_) == "->".join(str(UpperCamelCase_) for i in range(1, 6))
circular_linked_list.insert_tail(6)
assert str(UpperCamelCase_) == "->".join(str(UpperCamelCase_) for i in range(1, 7))
circular_linked_list.insert_head(0)
assert str(UpperCamelCase_) == "->".join(str(UpperCamelCase_) for i in range(0, 7))
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCamelCase_) == "->".join(str(UpperCamelCase_) for i in range(1, 6))
assert circular_linked_list.delete_nth(2) == 3
circular_linked_list.insert_nth(2, 3)
assert str(UpperCamelCase_) == "->".join(str(UpperCamelCase_) for i in range(1, 6))
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
| 0
|
from __future__ import annotations
__lowerCamelCase : Any = '''#'''
class a__ :
def __init__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : dict = {}
def __UpperCamelCase ( self : List[str],_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self._trie
for char in text:
if char not in trie:
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[Any] = trie[char]
SCREAMING_SNAKE_CASE_ : List[Any] = True
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._trie
for char in prefix:
if char in trie:
SCREAMING_SNAKE_CASE_ : Dict = trie[char]
else:
return []
return self._elements(_A )
def __UpperCamelCase ( self : List[Any],_A : dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = []
for c, v in d.items():
SCREAMING_SNAKE_CASE_ : Optional[int] = [" "] if c == END else [(c + s) for s in self._elements(_A )]
result.extend(_A )
return tuple(_A )
__lowerCamelCase : List[Any] = Trie()
__lowerCamelCase : Union[str, Any] = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = trie.find_word(lowerCAmelCase )
return tuple(string + word for word in suffixes )
def _snake_case ( ):
"""simple docstring"""
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 18
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**snake_case_ )
_lowerCAmelCase : str = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309
| 0
|
lowercase : List[str] = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 20
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.