code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
_lowercase : Optional[Any] = [1]
for i in range(2 , lowerCamelCase_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_lowercase : int = []
_lowercase : Union[str, Any] = list(range(lowerCamelCase_ ) )
# Find permutation
while factorials:
_lowercase : Dict = factorials.pop()
_lowercase , _lowercase : Any = divmod(lowerCamelCase_ , lowerCamelCase_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Dict = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : str = """deta"""
_lowerCamelCase : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , snake_case_ : Dict=None , snake_case_ : int=9_0_0 , snake_case_ : Tuple=2_0_4_8 , snake_case_ : Union[str, Any]=6 , snake_case_ : Tuple=2_0_4_8 , snake_case_ : int=8 , snake_case_ : str=6 , snake_case_ : int=1_0_2_4 , snake_case_ : str=8 , snake_case_ : Tuple=0.0 , snake_case_ : int=True , snake_case_ : str="relu" , snake_case_ : Optional[int]=2_5_6 , snake_case_ : int=0.1 , snake_case_ : int=0.0 , snake_case_ : Dict=0.0 , snake_case_ : List[Any]=0.0_2 , snake_case_ : Optional[int]=1.0 , snake_case_ : Tuple=True , snake_case_ : Dict=False , snake_case_ : Any="sine" , snake_case_ : int=5 , snake_case_ : Union[str, Any]=4 , snake_case_ : List[Any]=4 , snake_case_ : Optional[int]=True , snake_case_ : Optional[Any]=3_0_0 , snake_case_ : str=True , snake_case_ : Optional[Any]=True , snake_case_ : Dict=1 , snake_case_ : Optional[int]=5 , snake_case_ : str=2 , snake_case_ : int=1 , snake_case_ : Union[str, Any]=1 , snake_case_ : Dict=5 , snake_case_ : Union[str, Any]=2 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Optional[int]=0.2_5 , **snake_case_ : Optional[Any] , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = backbone_config.pop("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(snake_case_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = num_queries
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = init_xavier_std
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = auxiliary_loss
_UpperCAmelCase = position_embedding_type
# deformable attributes
_UpperCAmelCase = num_feature_levels
_UpperCAmelCase = encoder_n_points
_UpperCAmelCase = decoder_n_points
_UpperCAmelCase = two_stage
_UpperCAmelCase = two_stage_num_proposals
_UpperCAmelCase = with_box_refine
_UpperCAmelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_UpperCAmelCase = class_cost
_UpperCAmelCase = bbox_cost
_UpperCAmelCase = giou_cost
# Loss coefficients
_UpperCAmelCase = mask_loss_coefficient
_UpperCAmelCase = dice_loss_coefficient
_UpperCAmelCase = bbox_loss_coefficient
_UpperCAmelCase = giou_loss_coefficient
_UpperCAmelCase = eos_coefficient
_UpperCAmelCase = focal_alpha
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowercase ( self : List[Any] ):
return self.encoder_attention_heads
@property
def lowercase ( self : Dict ):
return self.d_model
def lowercase ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 22
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase_ = 0
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase_ = tuple[int, int]
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = pos_x
_lowerCAmelCase : List[str] = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : List[Any] = goal_x
_lowerCAmelCase : int = goal_y
_lowerCAmelCase : Union[str, Any] = g_cost
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = self.calculate_heuristic()
_lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.pos_x - self.goal_x
_lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : int = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_lowerCAmelCase : List[str] = current_bwd_node
_lowerCAmelCase : Dict = current_fwd_node
_lowerCAmelCase : Any = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = AStar(init, goal)
UpperCamelCase_ = a_star.search()
UpperCamelCase_ = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalAStar(init, goal)
UpperCamelCase_ = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 309
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__: Dict = logging.get_logger(__name__)
UpperCamelCase__: Optional[Any] = ["model.decoder.embed_positions.weights"]
def snake_case_ ( _lowerCAmelCase : str ) -> Tuple:
if "emb" in name:
UpperCAmelCase : int = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
UpperCAmelCase : Optional[Any] = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
UpperCAmelCase : List[str] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
UpperCAmelCase : Any = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
UpperCAmelCase : str = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
UpperCAmelCase : str = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
UpperCAmelCase : Optional[int] = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def snake_case_ ( _lowerCAmelCase : OrderedDict , _lowerCAmelCase : int ) -> Tuple[Dict, Dict]:
UpperCAmelCase : int = list(state_dict.keys() )
UpperCAmelCase : Tuple = {}
for key in keys:
UpperCAmelCase : Optional[int] = state_dict.pop(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = rename_keys(_lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Union[str, Any] = val[:hidden_size, :]
UpperCAmelCase : str = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : str = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : Tuple = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def snake_case_ ( _lowerCAmelCase : str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
UpperCAmelCase : Optional[int] = 1024
UpperCAmelCase : List[Any] = 24
UpperCAmelCase : List[Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : Tuple = 1536
UpperCAmelCase : Union[str, Any] = 48
UpperCAmelCase : Tuple = 24
elif checkpoint == "large":
UpperCAmelCase : Tuple = 2048
UpperCAmelCase : List[Any] = 48
UpperCAmelCase : List[Any] = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
UpperCAmelCase : List[Any] = MusicgenDecoderConfig(
hidden_size=_lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_lowerCAmelCase , num_attention_heads=_lowerCAmelCase , )
return config
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Dict="cpu" ) -> str:
UpperCAmelCase : List[str] = MusicGen.get_pretrained(_lowerCAmelCase , device=_lowerCAmelCase )
UpperCAmelCase : Dict = decoder_config_from_checkpoint(_lowerCAmelCase )
UpperCAmelCase : Any = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_state_dict(
_lowerCAmelCase , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Tuple = TaEncoderModel.from_pretrained('''t5-base''' )
UpperCAmelCase : Dict = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
UpperCAmelCase : Optional[int] = MusicgenForCausalLM(_lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
UpperCAmelCase : Any = MusicgenForConditionalGeneration(text_encoder=_lowerCAmelCase , audio_encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_lowerCAmelCase )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : List[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : List[str] = model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''t5-base''' )
UpperCAmelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
UpperCAmelCase : int = MusicgenProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
# set the appropriate bos/pad token ids
UpperCAmelCase : Optional[int] = 2048
UpperCAmelCase : List[Any] = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : str = 3.0
if pytorch_dump_folder is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_lowerCAmelCase )
processor.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase__: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
UpperCamelCase__: Optional[int] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 23
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309
| 0
|
from manim import *
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : List[str] ):
"""simple docstring"""
__snake_case = Rectangle(height=0.5 , width=0.5 )
__snake_case = Rectangle(height=0.2_5 , width=0.2_5 )
__snake_case = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
__snake_case = Text('''CPU''' , font_size=24 )
__snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a__ )
__snake_case = [mem.copy() for i in range(4 )]
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = Text('''GPU''' , font_size=24 )
__snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
gpu.move_to([-1, -1, 0] )
self.add(a__ )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = Text('''Model''' , font_size=24 )
__snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
model.move_to([3, -1.0, 0] )
self.add(a__ )
__snake_case = []
__snake_case = []
__snake_case = []
for i, rect in enumerate(a__ ):
rect.set_stroke(a__ )
__snake_case = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(a__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=a__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=a__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=a__ , buff=0.0 )
self.add(a__ )
model_cpu_arr.append(a__ )
self.add(*a__ , *a__ , *a__ )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = Text('''Loaded Checkpoint''' , font_size=24 )
__snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(a__ )
__snake_case = []
__snake_case = []
for i, rect in enumerate(a__ ):
__snake_case = fill.copy().set_fill(a__ , opacity=0.7 )
target.move_to(a__ )
ckpt_arr.append(a__ )
__snake_case = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(a__ )
self.add(*a__ , *a__ )
__snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__snake_case = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a__ , a__ )
__snake_case = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a__ )
__snake_case = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
__snake_case = [meta_mem.copy() for i in range(6 )]
__snake_case = [meta_mem.copy() for i in range(6 )]
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
__snake_case = Text('''Disk''' , font_size=24 )
__snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(a__ , run_time=3 ) , Write(a__ , run_time=1 ) , Create(a__ , run_time=1 ) )
__snake_case = []
for i, rect in enumerate(a__ ):
__snake_case = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(a__ , run_time=1.5 ) )
self.play(*a__ )
self.play(FadeOut(a__ ) )
__snake_case = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(a__ , run_time=3 ) )
self.play(
FadeOut(a__ , a__ , *a__ , *a__ ) , )
self.wait()
| 24
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase__ : List[Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ : Any = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase__ : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCAmelCase__ : Optional[int] = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : str = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
SCREAMING_SNAKE_CASE__ : Optional[int] = True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' ,_snake_case ,)
is not None
):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
SCREAMING_SNAKE_CASE__ : str = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
SCREAMING_SNAKE_CASE__ : int = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
SCREAMING_SNAKE_CASE__ : List[str] = True
if not attribute_used:
SCREAMING_SNAKE_CASE__ : List[str] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
SCREAMING_SNAKE_CASE__ : str = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
SCREAMING_SNAKE_CASE__ : Any = True
elif attribute.endswith("""_token_id""" ):
SCREAMING_SNAKE_CASE__ : Dict = True
# configuration class specific cases
if not case_allowed:
SCREAMING_SNAKE_CASE__ : List[str] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ ,[] )
SCREAMING_SNAKE_CASE__ : str = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = dict(inspect.signature(config_class.__init__ ).parameters )
SCREAMING_SNAKE_CASE__ : str = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
SCREAMING_SNAKE_CASE__ : List[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
SCREAMING_SNAKE_CASE__ : Tuple = {}
if len(config_class.attribute_map ) > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
SCREAMING_SNAKE_CASE__ : Any = inspect.getsourcefile(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.dirname(_snake_case )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [os.path.join(_snake_case ,_snake_case ) for fn in os.listdir(_snake_case ) if fn.startswith("""modeling_""" )]
# Get the source code strings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for path in modeling_paths:
if os.path.isfile(_snake_case ):
with open(_snake_case ) as fp:
modeling_sources.append(fp.read() )
SCREAMING_SNAKE_CASE__ : Any = []
for config_param, default_value in zip(_snake_case ,_snake_case ):
# `attributes` here is all the variant names for `config_param`
SCREAMING_SNAKE_CASE__ : Dict = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
unused_attributes.append(attributes[0] )
return sorted(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
SCREAMING_SNAKE_CASE__ : List[str] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) ,lambda _snake_case : inspect.isclass(_snake_case )
and issubclass(_snake_case ,_snake_case )
and inspect.getmodule(_snake_case ) == inspect.getmodule(_config_class ) ,)
]
for config_class in config_classes_in_module:
SCREAMING_SNAKE_CASE__ : Optional[int] = check_config_attributes_being_used(_snake_case )
if len(_snake_case ) > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = unused_attributes
if len(_snake_case ) > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(_snake_case )
if __name__ == "__main__":
check_config_attributes()
| 25
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309
| 0
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
_A : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(snake_case_ ) )
return round(snake_case_,ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : Optional[Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = StableDiffusionLatentUpscalePipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
A_ = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
A_ = True
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = 1
__a : Any = 4
__a : List[str] = (16, 16)
__a : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : List[Any] = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=__a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=__a , only_cross_attention=__a , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
__a : Dict = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
__a : str = EulerDiscreteScheduler(prediction_type='sample' )
__a : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='quick_gelu' , projection_dim=512 , )
__a : Optional[Any] = CLIPTextModel(__a )
__a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Any = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
if str(__a ).startswith('mps' ):
__a : str = torch.manual_seed(__a )
else:
__a : Tuple = torch.Generator(device=__a ).manual_seed(__a )
__a : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'cpu'
__a : List[Any] = self.get_dummy_components()
__a : Optional[int] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : Dict = self.get_dummy_inputs(__a )
__a : Tuple = pipe(**__a ).images
__a : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
__a : List[str] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
__a : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1E-3 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
__a : Tuple = self.get_dummy_components()
__a : Tuple = self.pipeline_class(**__a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : List[str] = self.get_dummy_inputs(__a )
__a : Any = 2
__a : Tuple = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__a : Tuple = getattr(__a , scheduler_enum.name )
__a : Optional[Any] = scheduler_cls.from_config(pipe.scheduler.config )
__a : int = pipe(**__a )[0]
outputs.append(__a )
assert check_same_shape(__a )
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = torch.manual_seed(33 )
__a : str = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa )
pipe.to('cuda' )
__a : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
__a : Union[str, Any] = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
__a : int = pipe(__a , generator=__a , output_type='latent' ).images
__a : Union[str, Any] = upscaler(
prompt=__a , image=__a , num_inference_steps=20 , guidance_scale=0 , generator=__a , output_type='np' , ).images[0]
__a : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = torch.manual_seed(33 )
__a : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
__a : Optional[int] = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
__a : List[str] = upscaler(
prompt=__a , image=__a , num_inference_steps=20 , guidance_scale=0 , generator=__a , output_type='np' , ).images[0]
__a : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 27
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 0
|
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_lowerCamelCase : Dict = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Any , UpperCamelCase__ : Path , UpperCamelCase__ : Union[str, None] = None , UpperCamelCase__ : Union[List[str], None] = None , UpperCamelCase__ : Union[str, List[str], None] = None , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = [file for file in os.listdir(UpperCamelCase__ ) if os.path.isfile(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )]
if identifier is not None:
UpperCamelCase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for n_ in n_identifier:
UpperCamelCase = [file for file in files if n_ not in file]
else:
UpperCamelCase = [file for file in files if n_identifier not in file]
UpperCamelCase = ignore_files or []
ignore_files.append('__init__.py' )
UpperCamelCase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , UpperCamelCase__ )
if only_modules:
UpperCamelCase = file.split('.' )[0]
try:
UpperCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = doctest.DocTestSuite(UpperCamelCase__ )
UpperCamelCase = unittest.TextTestRunner().run(UpperCamelCase__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
UpperCamelCase = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = Path('src/transformers' )
UpperCamelCase = 'modeling'
UpperCamelCase = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ , ignore_files=UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = Path('src/transformers' )
UpperCamelCase = 'tokenization'
self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = Path('src/transformers' )
UpperCamelCase = 'configuration'
self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = Path('src/transformers' )
UpperCamelCase = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(UpperCamelCase__ , n_identifier=UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = Path('docs/source' )
UpperCamelCase = ['favicon.ico']
self.analyze_directory(UpperCamelCase__ , ignore_files=UpperCamelCase__ , only_modules=UpperCamelCase__ )
| 28
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# ===== initialization =====
_lowerCAmelCase : Tuple = Mock()
_lowerCAmelCase : Any = conn, Mock()
_lowerCAmelCase : Optional[Any] = iter([1, None] )
_lowerCAmelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 309
| 0
|
__UpperCAmelCase = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__UpperCAmelCase = ['a', 'b', 'c', 'd', 'e']
def lowercase__ ( __snake_case : List[str] , __snake_case : str , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = start
# add current to visited
visited.append(__snake_case )
UpperCAmelCase_ : Optional[int] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCAmelCase_ : str = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
UpperCAmelCase_ : Optional[Any] = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
__UpperCAmelCase = topological_sort('a', [], [])
print(sort)
| 29
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
| 0
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowercase__:
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any]=1_3 , SCREAMING_SNAKE_CASE_ : int=3_0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : str=3_2 , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : List[Any]=3_7 , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : int=2 , ) -> Any:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 2
def _lowercase ( self : Any ) -> Union[str, Any]:
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Optional[int] ) -> str:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[Any]:
lowercase_ = TFDeiTModel(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
lowercase_ = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE_ )
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
lowercase_ = self.type_sequence_label_size
lowercase_ = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE_ )
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self : Optional[Any] ) -> Dict:
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :str = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
a :Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
a :Any = False
a :Optional[int] = False
a :Tuple = False
a :int = False
def _lowercase ( self : Any ) -> Union[str, Any]:
lowercase_ = TFDeiTModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def _lowercase ( self : Dict ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def _lowercase ( self : List[str] ) -> str:
pass
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , tf.keras.layers.Dense ) )
def _lowercase ( self : Optional[int] ) -> List[str]:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowercase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> Dict:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> List[str]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> List[str]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> List[str]:
lowercase_ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowercase ( self : int ) -> int:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def a ( ):
'''simple docstring'''
lowercase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Any ) -> Optional[int]:
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : List[str] ) -> Any:
lowercase_ = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' )
# forward pass
lowercase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowercase_ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 30
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = "blip_2_vision_model"
def __init__( self : Optional[Any] , A : Union[str, Any]=1408 , A : int=6144 , A : Union[str, Any]=39 , A : List[str]=16 , A : List[str]=224 , A : List[Any]=14 , A : int="gelu" , A : Optional[Any]=0.00_001 , A : str=0.0 , A : List[str]=1E-10 , A : Dict=True , **A : Optional[Any] , ):
super().__init__(**A )
_UpperCAmelCase : int = hidden_size
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : int = patch_size
_UpperCAmelCase : str = image_size
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Union[str, Any] = attention_dropout
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : Any = qkv_bias
@classmethod
def _A ( cls : Tuple , A : Union[str, os.PathLike] , **A : List[str] ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
_UpperCAmelCase : Dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A , **A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = "blip_2_qformer"
def __init__( self : List[str] , A : Dict=30522 , A : Optional[Any]=768 , A : List[str]=12 , A : int=12 , A : List[Any]=3072 , A : List[Any]="gelu" , A : List[str]=0.1 , A : int=0.1 , A : Any=512 , A : List[Any]=0.02 , A : List[Any]=1E-12 , A : Tuple=0 , A : int="absolute" , A : str=2 , A : List[Any]=1408 , **A : Optional[int] , ):
super().__init__(pad_token_id=A , **A )
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Optional[int] = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Dict = max_position_embeddings
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Any = layer_norm_eps
_UpperCAmelCase : Optional[int] = position_embedding_type
_UpperCAmelCase : Optional[Any] = cross_attention_frequency
_UpperCAmelCase : int = encoder_hidden_size
@classmethod
def _A ( cls : Optional[Any] , A : Union[str, os.PathLike] , **A : Optional[int] ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : str = cls.get_config_dict(A , **A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
_UpperCAmelCase : List[str] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A , **A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = "blip-2"
__UpperCamelCase: List[Any] = True
def __init__( self : str , A : Optional[Any]=None , A : List[Any]=None , A : str=None , A : Optional[int]=32 , **A : Union[str, Any] ):
super().__init__(**A )
if vision_config is None:
_UpperCAmelCase : Dict = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
_UpperCAmelCase : int = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
_UpperCAmelCase : int = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
_UpperCAmelCase : Dict = BlipaVisionConfig(**A )
_UpperCAmelCase : Optional[int] = BlipaQFormerConfig(**A )
_UpperCAmelCase : str = text_config["model_type"] if "model_type" in text_config else "opt"
_UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**A )
_UpperCAmelCase : Any = self.text_config.tie_word_embeddings
_UpperCAmelCase : Any = self.text_config.is_encoder_decoder
_UpperCAmelCase : List[str] = num_query_tokens
_UpperCAmelCase : Optional[int] = self.vision_config.hidden_size
_UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_UpperCAmelCase : int = 1.0
_UpperCAmelCase : Optional[Any] = 0.02
@classmethod
def _A ( cls : List[Any] , A : BlipaVisionConfig , A : BlipaQFormerConfig , A : PretrainedConfig , **A : Any , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A , )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : List[Any] = self.vision_config.to_dict()
_UpperCAmelCase : Tuple = self.qformer_config.to_dict()
_UpperCAmelCase : Dict = self.text_config.to_dict()
_UpperCAmelCase : Tuple = self.__class__.model_type
return output
| 31
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
| 0
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Tuple:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
raise NotImplementedError()
| 32
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
| 0
|
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowercase ( __snake_case : np.ndarray , __snake_case : np.ndarray , __snake_case : np.ndarray , __snake_case : int , __snake_case : int ):
lowercase_ : Optional[int] = cva.getAffineTransform(__snake_case , __snake_case )
return cva.warpAffine(__snake_case , __snake_case , (rows, cols) )
if __name__ == "__main__":
# read original image
__A : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__A : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__A , __A : Union[str, Any] = gray_img.shape
# set different points to rotate image
__A : Optional[Any] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__A : int = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__A : List[Any] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__A : str = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__A : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__A : Optional[int] = plt.figure(1)
__A : int = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 33
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _a :
def __init__( self : str , lowercase : List[Any] , lowercase : Dict=2 , lowercase : str=32 , lowercase : Optional[Any]=16 , lowercase : Optional[Any]=3 , lowercase : Union[str, Any]=True , lowercase : List[Any]=True , lowercase : Optional[int]=32 , lowercase : Any=4 , lowercase : str=[0, 1, 2, 3] , lowercase : List[Any]=4 , lowercase : str=37 , lowercase : Optional[Any]="gelu" , lowercase : Tuple=0.1 , lowercase : Tuple=0.1 , lowercase : Union[str, Any]=0.02 , lowercase : int=3 , lowercase : int=[1, 384, 24, 24] , lowercase : str=True , lowercase : List[Any]=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = backbone_out_indices
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = backbone_featmap_shape
UpperCAmelCase = scope
UpperCAmelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=lowercase , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : Optional[int] , lowercase : str , lowercase : Optional[Any] , lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = DPTModel(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[str] , lowercase : Tuple , lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = DPTForDepthEstimation(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : int , lowercase : Dict , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = DPTForSemanticSegmentation(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , labels=lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( __a , __a , unittest.TestCase ):
__a : Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__a : Optional[int] = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__a : Any = False
__a : List[Any] = False
__a : Dict = False
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = DPTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def A ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowercase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*lowercase )
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
def A ( self : List[str] ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
if model_class in get_values(lowercase ):
continue
UpperCAmelCase = model_class(lowercase )
model.to(lowercase )
model.train()
UpperCAmelCase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
UpperCAmelCase = model(**lowercase ).loss
loss.backward()
def A ( self : str ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = False
UpperCAmelCase = True
if model_class in get_values(lowercase ) or not model_class.supports_gradient_checkpointing:
continue
UpperCAmelCase = model_class(lowercase )
model.to(lowercase )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
UpperCAmelCase = model(**lowercase ).loss
loss.backward()
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = _config_zero_init(lowercase )
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=lowercase )
# Skip the check for the backbone
UpperCAmelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCAmelCase = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : int ):
'''simple docstring'''
pass
@slow
def A ( self : Any ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCAmelCase = DPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = '''add'''
with self.assertRaises(lowercase ):
UpperCAmelCase = DPTForDepthEstimation(lowercase )
def snake_case_ ():
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class _a ( unittest.TestCase ):
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCAmelCase = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(lowercase )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowercase , return_tensors='''pt''' ).to(lowercase )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**lowercase )
UpperCAmelCase = outputs.predicted_depth
# verify the predicted depth
UpperCAmelCase = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , lowercase )
UpperCAmelCase = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , lowercase , atol=1E-4 ) )
| 34
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : List[Any] = min(_lowerCamelCase ) # min() finds the minimum value
_lowerCAmelCase : Tuple = max(_lowerCamelCase ) # max() finds the maximum value
_lowerCAmelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase : Dict = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase : Any = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase : Optional[int] = count + min_val
i += 1
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" , """ """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 309
| 0
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = IFImgaImgSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCamelCase ( self : str ):
return self._get_superresolution_dummy_components()
def lowerCamelCase ( self : Any , snake_case_ : Optional[Any] , snake_case_ : List[str]=0 ):
if str(snake_case_ ).startswith("""mps""" ):
snake_case__ : List[str] = torch.manual_seed(snake_case_ )
else:
snake_case__ : Optional[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
snake_case__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
snake_case__ : List[str] = floats_tensor((1, 3, 16, 16) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
snake_case__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase ( self : int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCamelCase ( self : Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase ( self : List[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 35
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309
| 0
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = BartphoTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : str = ["▁This", "▁is", "▁a", "▁t", "est"]
_lowerCAmelCase : List[str] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n")
_lowerCAmelCase : Optional[Any] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self, **__a):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "This is a là test"
_lowerCAmelCase : Optional[int] = "This is a<unk><unk> test"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map)
_lowerCAmelCase : List[Any] = "This is a là test"
_lowerCAmelCase : str = "▁This ▁is ▁a ▁l à ▁t est".split()
_lowerCAmelCase : str = tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), __a)
| 36
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ) -> int:
lowerCAmelCase__ : Optional[int] = {}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=1 ) -> Dict:
if self.graph.get(__UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCAmelCase__ : List[str] = [[w, v]]
if not self.graph.get(__UpperCAmelCase ):
lowerCAmelCase__ : Dict = []
def UpperCAmelCase_ ( self ) -> Tuple:
return list(self.graph )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase=-2 ,__UpperCAmelCase=-1 ) -> Optional[Any]:
if s == d:
return []
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : Tuple = []
if s == -2:
lowerCAmelCase__ : Any = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase__ : Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase__ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
lowerCAmelCase__ : Any = stack[len(__UpperCAmelCase ) - 1]
else:
lowerCAmelCase__ : int = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def UpperCAmelCase_ ( self ,__UpperCAmelCase=-1 ) -> List[Any]:
if c == -1:
lowerCAmelCase__ : Tuple = floor(random() * 1_0000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCAmelCase__ : int = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase ,__UpperCAmelCase ,1 )
def UpperCAmelCase_ ( self ,__UpperCAmelCase=-2 ) -> int:
lowerCAmelCase__ : List[Any] = deque()
lowerCAmelCase__ : Tuple = []
if s == -2:
lowerCAmelCase__ : Optional[int] = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
lowerCAmelCase__ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : Optional[Any] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
return len(self.graph[u] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase=-2 ) -> Optional[Any]:
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Dict = []
if s == -2:
lowerCAmelCase__ : List[str] = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = s
lowerCAmelCase__ : Dict = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase__ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase__ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__UpperCAmelCase ) != 0:
lowerCAmelCase__ : int = stack[len(__UpperCAmelCase ) - 1]
else:
lowerCAmelCase__ : List[str] = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return sorted_nodes
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : List[Any] = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = -2
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Optional[int] = s
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase__ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase__ : Any = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase__ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase__ : Dict = True
if len(__UpperCAmelCase ) != 0:
lowerCAmelCase__ : Optional[int] = stack[len(__UpperCAmelCase ) - 1]
else:
lowerCAmelCase__ : List[str] = False
indirect_parents.append(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = s
lowerCAmelCase__ : Dict = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Dict = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = -2
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : List[Any] = s
lowerCAmelCase__ : str = False
lowerCAmelCase__ : int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase__ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase__ : List[str] = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase__ : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase__ : List[str] = True
if len(__UpperCAmelCase ) != 0:
lowerCAmelCase__ : Union[str, Any] = stack[len(__UpperCAmelCase ) - 1]
else:
lowerCAmelCase__ : Optional[Any] = False
indirect_parents.append(__UpperCAmelCase )
lowerCAmelCase__ : str = s
lowerCAmelCase__ : List[str] = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def UpperCAmelCase_ ( self ,__UpperCAmelCase=-2 ,__UpperCAmelCase=-1 ) -> int:
lowerCAmelCase__ : Optional[Any] = time()
self.dfs(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : Dict = time()
return end - begin
def UpperCAmelCase_ ( self ,__UpperCAmelCase=-2 ) -> Dict:
lowerCAmelCase__ : str = time()
self.bfs(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = time()
return end - begin
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ) -> Optional[int]:
lowerCAmelCase__ : int = {}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=1 ) -> Optional[int]:
# check if the u exists
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCAmelCase__ : Tuple = [[w, v]]
# add the other way
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCAmelCase__ : List[Any] = [[w, u]]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
# the other way round
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase=-2 ,__UpperCAmelCase=-1 ) -> List[str]:
if s == d:
return []
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Optional[int] = []
if s == -2:
lowerCAmelCase__ : str = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
lowerCAmelCase__ : Any = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase__ : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase__ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
lowerCAmelCase__ : Optional[int] = stack[len(__UpperCAmelCase ) - 1]
else:
lowerCAmelCase__ : Optional[Any] = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def UpperCAmelCase_ ( self ,__UpperCAmelCase=-1 ) -> Dict:
if c == -1:
lowerCAmelCase__ : Dict = floor(random() * 1_0000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCAmelCase__ : Any = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase ,__UpperCAmelCase ,1 )
def UpperCAmelCase_ ( self ,__UpperCAmelCase=-2 ) -> Tuple:
lowerCAmelCase__ : Dict = deque()
lowerCAmelCase__ : Optional[int] = []
if s == -2:
lowerCAmelCase__ : Optional[int] = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
lowerCAmelCase__ : Any = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
return len(self.graph[u] )
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Optional[int] = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
lowerCAmelCase__ : int = -2
lowerCAmelCase__ : int = []
lowerCAmelCase__ : List[str] = s
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase__ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase__ : int = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase__ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase__ : Optional[Any] = True
if len(__UpperCAmelCase ) != 0:
lowerCAmelCase__ : Optional[Any] = stack[len(__UpperCAmelCase ) - 1]
else:
lowerCAmelCase__ : int = False
indirect_parents.append(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = s
lowerCAmelCase__ : List[str] = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Optional[Any] = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
lowerCAmelCase__ : Dict = -2
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Any = s
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase__ : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase__ : List[Any] = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase__ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase__ : Optional[int] = True
if len(__UpperCAmelCase ) != 0:
lowerCAmelCase__ : str = stack[len(__UpperCAmelCase ) - 1]
else:
lowerCAmelCase__ : Tuple = False
indirect_parents.append(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = s
lowerCAmelCase__ : int = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def UpperCAmelCase_ ( self ) -> str:
return list(self.graph )
def UpperCAmelCase_ ( self ,__UpperCAmelCase=-2 ,__UpperCAmelCase=-1 ) -> List[str]:
lowerCAmelCase__ : Tuple = time()
self.dfs(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : int = time()
return end - begin
def UpperCAmelCase_ ( self ,__UpperCAmelCase=-2 ) -> int:
lowerCAmelCase__ : Optional[int] = time()
self.bfs(__UpperCAmelCase )
lowerCAmelCase__ : Any = time()
return end - begin
| 37
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""DPTFeatureExtractor"""]
UpperCamelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : Dict = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Any = {
'''allenai/led-base-16384''': 1_63_84,
}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : int = VOCAB_FILES_NAMES
snake_case__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : int = LEDTokenizer
snake_case__ : str = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , __lowerCamelCase : Dict=None , __lowerCamelCase : Any=None , __lowerCamelCase : int=None , __lowerCamelCase : Any="replace" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Tuple="</s>" , __lowerCamelCase : List[Any]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Any="<pad>" , __lowerCamelCase : Tuple="<mask>" , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[Any]=True , **__lowerCamelCase : str , ):
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __lowerCamelCase ) != add_prefix_space:
UpperCamelCase :Union[str, Any] = getattr(__lowerCamelCase , pre_tok_state.pop("""type""" ) )
UpperCamelCase :Optional[Any] = add_prefix_space
UpperCamelCase :Tuple = pre_tok_class(**__lowerCamelCase )
UpperCamelCase :Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase :str = """post_processor"""
UpperCamelCase :List[str] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
if tokenizer_component_instance:
UpperCamelCase :Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase :int = tuple(state["""sep"""] )
if "cls" in state:
UpperCamelCase :Optional[int] = tuple(state["""cls"""] )
UpperCamelCase :Optional[Any] = False
if state.get("""add_prefix_space""" , __lowerCamelCase ) != add_prefix_space:
UpperCamelCase :Optional[Any] = add_prefix_space
UpperCamelCase :Union[str, Any] = True
if state.get("""trim_offsets""" , __lowerCamelCase ) != trim_offsets:
UpperCamelCase :Tuple = trim_offsets
UpperCamelCase :Tuple = True
if changes_to_apply:
UpperCamelCase :Tuple = getattr(__lowerCamelCase , state.pop("""type""" ) )
UpperCamelCase :int = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _A ( self : Dict ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _A ( self : Dict , __lowerCamelCase : List[str] ):
UpperCamelCase :Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value
UpperCamelCase :List[Any] = value
def _A ( self : Union[str, Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[int] ):
UpperCamelCase :Optional[int] = kwargs.get("""is_split_into_words""" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def _A ( self : Tuple , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : str ):
UpperCamelCase :Tuple = kwargs.get("""is_split_into_words""" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def _A ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
UpperCamelCase :List[str] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def _A ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=None ):
UpperCamelCase :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _A ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
UpperCamelCase :Dict = [self.sep_token_id]
UpperCamelCase :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A ( self : Tuple , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
UpperCamelCase :List[Any] = super()._pad(
encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase :List[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase :str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase :Union[str, Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(__lowerCamelCase )
if needs_to_be_padded:
UpperCamelCase :Optional[int] = len(__lowerCamelCase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase :Optional[Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase :Union[str, Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 38
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : list[float] ) -> Dict:
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
| 0
|
from collections.abc import Generator
from math import sin
def __A ( __lowerCAmelCase )-> bytes:
"""simple docstring"""
if len(__lowerCAmelCase ) != 32:
raise ValueError('Input must be of length 32' )
_UpperCAmelCase = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __A ( __lowerCAmelCase )-> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_UpperCAmelCase = format(__lowerCAmelCase , '08x' )[-8:]
_UpperCAmelCase = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def __A ( __lowerCAmelCase )-> bytes:
"""simple docstring"""
_UpperCAmelCase = b''
for char in message:
bit_string += format(__lowerCAmelCase , '08b' ).encode('utf-8' )
_UpperCAmelCase = format(len(__lowerCAmelCase ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__lowerCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __A ( __lowerCAmelCase )-> Generator[list[int], None, None]:
"""simple docstring"""
if len(__lowerCAmelCase ) % 512 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__lowerCAmelCase ) , 512 ):
_UpperCAmelCase = bit_string[pos : pos + 512]
_UpperCAmelCase = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_UpperCAmelCase = format(__lowerCAmelCase , '032b' )
_UpperCAmelCase = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__lowerCAmelCase , 2 )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
return (a + b) % 2**32
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __A ( __lowerCAmelCase )-> bytes:
"""simple docstring"""
_UpperCAmelCase = preprocess(__lowerCAmelCase )
_UpperCAmelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_UpperCAmelCase = 0X6_7_4_5_2_3_0_1
_UpperCAmelCase = 0XE_F_C_D_A_B_8_9
_UpperCAmelCase = 0X9_8_B_A_D_C_F_E
_UpperCAmelCase = 0X1_0_3_2_5_4_7_6
_UpperCAmelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__lowerCAmelCase ):
_UpperCAmelCase = aa
_UpperCAmelCase = ba
_UpperCAmelCase = ca
_UpperCAmelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_UpperCAmelCase = d ^ (b & (c ^ d))
_UpperCAmelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_UpperCAmelCase = c ^ (d & (b ^ c))
_UpperCAmelCase = (5 * i + 1) % 16
elif i <= 47:
_UpperCAmelCase = b ^ c ^ d
_UpperCAmelCase = (3 * i + 5) % 16
else:
_UpperCAmelCase = c ^ (b | not_aa(__lowerCAmelCase ))
_UpperCAmelCase = (7 * i) % 16
_UpperCAmelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
_UpperCAmelCase = d
_UpperCAmelCase = c
_UpperCAmelCase = b
_UpperCAmelCase = sum_aa(__lowerCAmelCase , left_rotate_aa(__lowerCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_UpperCAmelCase = sum_aa(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = sum_aa(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = sum_aa(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = sum_aa(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = reformat_hex(__lowerCAmelCase ) + reformat_hex(__lowerCAmelCase ) + reformat_hex(__lowerCAmelCase ) + reformat_hex(__lowerCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309
| 0
|
'''simple docstring'''
import string
import numpy
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , UpperCamelCase )
class _lowercase :
a = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
a = numpy.vectorize(lambda _lowercase : x % 36 )
a = numpy.vectorize(_lowercase )
def __init__( self: Optional[int] , UpperCamelCase__: numpy.ndarray ):
lowerCamelCase__ : List[Any] = self.modulus(UpperCamelCase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCamelCase__ : Optional[Any] = encrypt_key.shape[0]
def lowerCamelCase_ ( self: int , UpperCamelCase__: str ):
return self.key_string.index(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int ):
return self.key_string[round(UpperCamelCase__ )]
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCamelCase__ : Optional[Any] = det % len(self.key_string )
lowerCamelCase__ : int = len(self.key_string )
if greatest_common_divisor(UpperCamelCase__ , len(self.key_string ) ) != 1:
lowerCamelCase__ : List[str] = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: str ):
lowerCamelCase__ : List[str] = [char for char in text.upper() if char in self.key_string]
lowerCamelCase__ : Optional[Any] = chars[-1]
while len(UpperCamelCase__ ) % self.break_key != 0:
chars.append(UpperCamelCase__ )
return "".join(UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: str ):
lowerCamelCase__ : Tuple = self.process_text(text.upper() )
lowerCamelCase__ : str = """"""
for i in range(0 , len(UpperCamelCase__ ) - self.break_key + 1 , self.break_key ):
lowerCamelCase__ : List[Any] = text[i : i + self.break_key]
lowerCamelCase__ : Dict = [self.replace_letters(UpperCamelCase__ ) for char in batch]
lowerCamelCase__ : Union[str, Any] = numpy.array([vec] ).T
lowerCamelCase__ : List[Any] = self.modulus(self.encrypt_key.dot(UpperCamelCase__ ) ).T.tolist()[
0
]
lowerCamelCase__ : List[str] = """""".join(
self.replace_digits(UpperCamelCase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : str = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCamelCase__ : Any = det % len(self.key_string )
lowerCamelCase__ : Any = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowerCamelCase__ : Optional[Any] = i
break
lowerCamelCase__ : Optional[Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase__ ) )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: str ):
lowerCamelCase__ : str = self.make_decrypt_key()
lowerCamelCase__ : Optional[Any] = self.process_text(text.upper() )
lowerCamelCase__ : Union[str, Any] = """"""
for i in range(0 , len(UpperCamelCase__ ) - self.break_key + 1 , self.break_key ):
lowerCamelCase__ : List[str] = text[i : i + self.break_key]
lowerCamelCase__ : Optional[Any] = [self.replace_letters(UpperCamelCase__ ) for char in batch]
lowerCamelCase__ : List[Any] = numpy.array([vec] ).T
lowerCamelCase__ : Any = self.modulus(decrypt_key.dot(UpperCamelCase__ ) ).T.tolist()[0]
lowerCamelCase__ : List[str] = """""".join(
self.replace_digits(UpperCamelCase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def SCREAMING_SNAKE_CASE_ () -> None:
lowerCamelCase__ : List[Any] = int(input("""Enter the order of the encryption key: """ ) )
lowerCamelCase__ : List[str] = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(UpperCamelCase ):
lowerCamelCase__ : Union[str, Any] = [int(UpperCamelCase ) for x in input().split()]
hill_matrix.append(UpperCamelCase )
lowerCamelCase__ : List[str] = HillCipher(numpy.array(UpperCamelCase ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
lowerCamelCase__ : Union[str, Any] = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
lowerCamelCase__ : Tuple = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(UpperCamelCase ) )
elif option == "2":
lowerCamelCase__ : Union[str, Any] = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 41
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**snake_case_ )
_lowerCAmelCase : str = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase : str = logging.get_logger(__name__)
lowercase : Dict = {"vocab_file": "vocab.txt"}
lowercase : Tuple = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
lowercase : Any = {
"YituTech/conv-bert-base": 512,
"YituTech/conv-bert-medium-small": 512,
"YituTech/conv-bert-small": 512,
}
lowercase : Optional[int] = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = PRETRAINED_INIT_CONFIGURATION
__lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase = ConvBertTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_="[UNK]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[PAD]" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_snake_case = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = tokenize_chinese_chars
_snake_case = normalizer_class(**lowerCAmelCase_ )
_snake_case = do_lower_case
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 42
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
__lowercase = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
__UpperCamelCase :str = '''lm_head'''
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__UpperCamelCase :int = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__UpperCamelCase :Any = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCamelCase :Optional[Any] = value
elif weight_type == "weight_g":
__UpperCamelCase :Any = value
elif weight_type == "weight_v":
__UpperCamelCase :Any = value
elif weight_type == "bias":
__UpperCamelCase :Any = value
else:
__UpperCamelCase :Optional[Any] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = []
__UpperCamelCase :Tuple = fairseq_model.state_dict()
__UpperCamelCase :Tuple = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase :Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCamelCase :List[Any] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase :Optional[int] = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__UpperCamelCase :Optional[int] = True
if "*" in mapped_key:
__UpperCamelCase :str = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
__UpperCamelCase :Tuple = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__UpperCamelCase :Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
__UpperCamelCase :Optional[int] = '''weight_v'''
elif "bias" in name:
__UpperCamelCase :str = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase :int = '''weight'''
else:
__UpperCamelCase :str = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = full_name.split('''conv_layers.''' )[-1]
__UpperCamelCase :Optional[int] = name.split('''.''' )
__UpperCamelCase :int = int(items[0] )
__UpperCamelCase :List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCamelCase :Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCamelCase :int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCamelCase :str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCamelCase :List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase :str = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Optional[Any] = UniSpeechConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase :Tuple = Dictionary.load_from_json(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase :Tuple = target_dict.pad_index
__UpperCamelCase :Any = target_dict.bos_index
__UpperCamelCase :List[str] = target_dict.eos_index
__UpperCamelCase :int = len(target_dict.symbols )
__UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCamelCase :int = 42
__UpperCamelCase :str = 43
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :int = True if config.feat_extract_norm == '''layer''' else False
__UpperCamelCase :List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :List[str] = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = UniSpeechForCTC(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :List[Any] = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase :Optional[int] = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowercase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 43
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __A :
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=False , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : Union[str, Any] = use_input_mask
_lowerCAmelCase : List[str] = use_token_type_ids
_lowerCAmelCase : Tuple = use_labels
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : List[Any] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : List[Any] = type_vocab_size
_lowerCAmelCase : int = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : Optional[int] = num_choices
_lowerCAmelCase : Optional[Any] = scope
def __A ( self ):
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
_lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : int = None
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Any = LlamaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(a__ , attention_mask=a__ )
_lowerCAmelCase : str = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Tuple = LlamaModel(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[str] = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Optional[Any] = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , )
_lowerCAmelCase : Any = model(a__ , attention_mask=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Tuple = LlamaForCausalLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : int = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Tuple = LlamaForCausalLM(config=a__ )
model.to(a__ )
model.eval()
# first forward pass
_lowerCAmelCase : Union[str, Any] = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , use_cache=a__ , )
_lowerCAmelCase : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCAmelCase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCAmelCase : Tuple = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_hidden_states=a__ , )["""hidden_states"""][0]
_lowerCAmelCase : Optional[Any] = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , past_key_values=a__ , output_hidden_states=a__ , )["""hidden_states"""][0]
# select random slice
_lowerCAmelCase : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a__ , a__ , atol=1e-3 ) )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCamelCase : Dict = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : Union[str, Any] = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Dict = False
def __A ( self ):
_lowerCAmelCase : Dict = LlamaModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : List[str] = type
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[int] = 3
_lowerCAmelCase : Union[str, Any] = input_dict["""input_ids"""]
_lowerCAmelCase : str = input_ids.ne(1 ).to(a__ )
_lowerCAmelCase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase : List[Any] = LlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : int = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = 3
_lowerCAmelCase : str = """single_label_classification"""
_lowerCAmelCase : Optional[int] = input_dict["""input_ids"""]
_lowerCAmelCase : Union[str, Any] = input_ids.ne(1 ).to(a__ )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase : Union[str, Any] = LlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[str] = 3
_lowerCAmelCase : int = """multi_label_classification"""
_lowerCAmelCase : Tuple = input_dict["""input_ids"""]
_lowerCAmelCase : Dict = input_ids.ne(1 ).to(a__ )
_lowerCAmelCase : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCAmelCase : Union[str, Any] = LlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : int = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def __A ( self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def __A ( self , a__ ):
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = ids_tensor([1, 10] , config.vocab_size )
_lowerCAmelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCAmelCase : List[str] = LlamaModel(a__ )
original_model.to(a__ )
original_model.eval()
_lowerCAmelCase : List[str] = original_model(a__ ).last_hidden_state
_lowerCAmelCase : Optional[Any] = original_model(a__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCAmelCase : Tuple = {"""type""": scaling_type, """factor""": 1_0.0}
_lowerCAmelCase : Optional[Any] = LlamaModel(a__ )
scaled_model.to(a__ )
scaled_model.eval()
_lowerCAmelCase : Any = scaled_model(a__ ).last_hidden_state
_lowerCAmelCase : Optional[Any] = scaled_model(a__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a__ , a__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(a__ , a__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a__ , a__ , atol=1e-5 ) )
@require_torch
class __A ( unittest.TestCase ):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
_lowerCAmelCase : Tuple = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_lowerCAmelCase : Optional[int] = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCAmelCase : List[str] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
_lowerCAmelCase : Tuple = model(torch.tensor(a__ ) )
# Expected mean on dim = -1
_lowerCAmelCase : Union[str, Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCAmelCase : List[str] = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ):
_lowerCAmelCase : int = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCAmelCase : Dict = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
_lowerCAmelCase : Tuple = model(torch.tensor(a__ ) )
# Expected mean on dim = -1
_lowerCAmelCase : Optional[Any] = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCAmelCase : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def __A ( self ):
_lowerCAmelCase : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
_lowerCAmelCase : str = model(torch.tensor(a__ ) )
_lowerCAmelCase : Tuple = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1e-2 , rtol=1e-2 )
# fmt: off
_lowerCAmelCase : Union[str, Any] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def __A ( self ):
_lowerCAmelCase : List[str] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
_lowerCAmelCase : str = """Simply put, the theory of relativity states that """
_lowerCAmelCase : List[str] = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
_lowerCAmelCase : Optional[Any] = tokenizer.encode(a__ , return_tensors="""pt""" )
_lowerCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=a__ )
# greedy generation outputs
_lowerCAmelCase : Optional[int] = model.generate(a__ , max_new_tokens=64 , top_p=a__ , temperature=1 , do_sample=a__ )
_lowerCAmelCase : Dict = tokenizer.decode(generated_ids[0] , skip_special_tokens=a__ )
self.assertEqual(a__ , a__ )
| 44
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase_ = 0
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase_ = tuple[int, int]
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = pos_x
_lowerCAmelCase : List[str] = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : List[Any] = goal_x
_lowerCAmelCase : int = goal_y
_lowerCAmelCase : Union[str, Any] = g_cost
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = self.calculate_heuristic()
_lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.pos_x - self.goal_x
_lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : int = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_lowerCAmelCase : List[str] = current_bwd_node
_lowerCAmelCase : Dict = current_fwd_node
_lowerCAmelCase : Any = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = AStar(init, goal)
UpperCamelCase_ = a_star.search()
UpperCamelCase_ = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalAStar(init, goal)
UpperCamelCase_ = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 309
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = KandinskyVaaImgaImgPipeline
__UpperCAmelCase : Optional[Any] = ['image_embeds', 'negative_image_embeds', 'image']
__UpperCAmelCase : List[Any] = [
'image_embeds',
'negative_image_embeds',
'image',
]
__UpperCAmelCase : Optional[Any] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__UpperCAmelCase : Any = False
@property
def __UpperCAmelCase ( self ):
return 32
@property
def __UpperCAmelCase ( self ):
return 32
@property
def __UpperCAmelCase ( self ):
return self.time_input_dim
@property
def __UpperCAmelCase ( self ):
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self ):
return 100
@property
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__a = UNetaDConditionModel(**_a )
return model
@property
def __UpperCAmelCase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCAmelCase ( self ):
__a = self.dummy_unet
__a = self.dummy_movq
__a = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__a = DDIMScheduler(**_a )
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCAmelCase ( self , _a , _a=0 ):
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
__a = torch.manual_seed(_a )
else:
__a = torch.Generator(device=_a ).manual_seed(_a )
__a = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCAmelCase ( self ):
__a = '''cpu'''
__a = self.get_dummy_components()
__a = self.pipeline_class(**_a )
__a = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__a = pipe(**self.get_dummy_inputs(_a ) )
__a = output.images
__a = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__a = '''A red cartoon frog, 4k'''
__a = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
__a = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
__a = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
__a = torch.Generator(device='''cpu''' ).manual_seed(0 )
__a , __a = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__a = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 45
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309
| 0
|
"""simple docstring"""
import math
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE , 2 ) - a
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return 2 * x
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
lowerCAmelCase = 2.0
while start <= a:
lowerCAmelCase = math.pow(SCREAMING_SNAKE_CASE , 2 )
return start
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int = 99_99 , SCREAMING_SNAKE_CASE : float = 0.00_00_00_00_00_00_01 ):
'''simple docstring'''
if a < 0:
raise ValueError("""math domain error""" )
lowerCAmelCase = get_initial_point(SCREAMING_SNAKE_CASE )
for _ in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = value
lowerCAmelCase = value - fx(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / fx_derivative(SCREAMING_SNAKE_CASE )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 46
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : str , _a : Any , _a : List[str]=13 , _a : Union[str, Any]=7 , _a : List[Any]=True , _a : Optional[int]=True , _a : Tuple=True , _a : Optional[Any]=True , _a : Optional[int]=99 , _a : str=24 , _a : str=2 , _a : Union[str, Any]=6 , _a : Union[str, Any]=37 , _a : Any="gelu" , _a : Any=0.1 , _a : List[Any]=0.1 , _a : List[str]=512 , _a : Tuple=16 , _a : List[str]=2 , _a : int=0.02 , _a : Optional[int]=3 , _a : Any=None , _a : int=1000 , ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =seq_length
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_input_mask
_SCREAMING_SNAKE_CASE =use_token_type_ids
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =range_bbox
def A ( self : int ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_SCREAMING_SNAKE_CASE =bbox[i, j, 3]
_SCREAMING_SNAKE_CASE =bbox[i, j, 1]
_SCREAMING_SNAKE_CASE =t
if bbox[i, j, 2] < bbox[i, j, 0]:
_SCREAMING_SNAKE_CASE =bbox[i, j, 2]
_SCREAMING_SNAKE_CASE =bbox[i, j, 0]
_SCREAMING_SNAKE_CASE =t
_SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_SCREAMING_SNAKE_CASE =None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A ( self : int , _a : Any , _a : str , _a : Union[str, Any] , _a : Any , _a : Tuple , _a : List[Any] , _a : Dict , ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LiltModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , bbox=_a , attention_mask=_a , token_type_ids=_a )
_SCREAMING_SNAKE_CASE =model(_a , bbox=_a , token_type_ids=_a )
_SCREAMING_SNAKE_CASE =model(_a , bbox=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : Optional[int] , _a : Tuple , _a : List[str] , _a : Optional[Any] , _a : Any , _a : Tuple , _a : Any , _a : Optional[int] , ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =LiltForTokenClassification(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(
_a , bbox=_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Tuple , _a : str , _a : List[str] , _a : Optional[Any] , _a : Union[str, Any] , _a : Dict , _a : List[Any] , _a : str , ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LiltForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(
_a , bbox=_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Dict ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , A__ , unittest.TestCase ):
A__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = False
A__ = False
def A ( self : Tuple , _a : Union[str, Any] , _a : int , _a : List[Any] , _a : Optional[int] , _a : int ) -> Optional[Any]:
'''simple docstring'''
return True
def A ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LiltModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , hidden_size=37 )
def A ( self : Dict ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def A ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE =type
self.model_tester.create_and_check_model(*_a )
def A ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
def A ( self : List[Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
@slow
def A ( self : Optional[int] ) -> Dict:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =LiltModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
@slow
class A__ ( unittest.TestCase ):
def A ( self : Dict ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(_a )
_SCREAMING_SNAKE_CASE =torch.tensor([[1, 2]] , device=_a )
_SCREAMING_SNAKE_CASE =torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(input_ids=_a , bbox=_a )
_SCREAMING_SNAKE_CASE =torch.Size([1, 2, 768] )
_SCREAMING_SNAKE_CASE =torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=_a , )
self.assertTrue(outputs.last_hidden_state.shape , _a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _a , atol=1e-3 ) )
| 47
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309
| 0
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class UpperCamelCase__ :
'''simple docstring'''
pass
| 48
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
| 0
|
def __snake_case ( _UpperCAmelCase ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__a = len(_UpperCAmelCase )
__a = max(_UpperCAmelCase )
__a = min(_UpperCAmelCase )
# create the counting array
__a = coll_max + 1 - coll_min
__a = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , _UpperCAmelCase ):
__a = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__a = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , _UpperCAmelCase ) ):
__a = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __snake_case ( _UpperCAmelCase ):
return "".join([chr(_UpperCAmelCase ) for i in counting_sort([ord(_UpperCAmelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
__snake_case :Tuple = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case :Dict = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 49
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 0
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any]=99 , UpperCAmelCase : str=13 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : str=9 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=True , UpperCAmelCase : Any=False , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : int=8 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=0.0_0_2 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=None , ) -> Union[str, Any]:
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = encoder_seq_length
lowerCamelCase__ : int = decoder_seq_length
# For common tests
lowerCamelCase__ : List[str] = self.decoder_seq_length
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : List[Any] = use_attention_mask
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : str = d_ff
lowerCamelCase__ : Optional[Any] = relative_attention_num_buckets
lowerCamelCase__ : Any = dropout_rate
lowerCamelCase__ : Any = initializer_factor
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : List[str] = pad_token_id
lowerCamelCase__ : List[str] = decoder_start_token_id
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = decoder_layers
def A_ ( self : List[Any] ) -> int:
return TaConfig.from_pretrained('google/umt5-base' )
def A_ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
if attention_mask is None:
lowerCamelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ : int = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase )
if decoder_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def A_ ( self : str ) -> List[str]:
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Dict = self.get_config()
lowerCamelCase__ : Tuple = config.num_attention_heads
lowerCamelCase__ : Any = self.prepare_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, input_dict
def A_ ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : Optional[int] ) -> List[str]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Union[str, Any] ) -> Dict:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Dict , ) -> str:
lowerCamelCase__ : Dict = UMTaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[int] = model(
input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Any = model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase )
lowerCamelCase__ : Dict = result.last_hidden_state
lowerCamelCase__ : Any = result.past_key_values
lowerCamelCase__ : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
lowerCamelCase__ : List[Any] = UMTaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval()
# first forward pass
lowerCamelCase__ : Tuple = model(UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
lowerCamelCase__ : int = model(UpperCAmelCase , use_cache=UpperCAmelCase )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) + 1 )
lowerCamelCase__ , lowerCamelCase__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase__ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : List[str] = model(UpperCAmelCase )['last_hidden_state']
lowerCamelCase__ : str = model(UpperCAmelCase , past_key_values=UpperCAmelCase )['last_hidden_state']
# select random slice
lowerCamelCase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = UMTaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).half().eval()
lowerCamelCase__ : Optional[int] = model(**UpperCAmelCase )['last_hidden_state']
self.parent.assertFalse(torch.isnan(UpperCAmelCase ).any().item() )
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase__ = [0.8, 0.9]
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Tuple = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCAmelCase , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A_ ( self : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase )
def A_ ( self : List[Any] ) -> str:
lowerCamelCase__ : int = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Any = config_and_inputs[0]
lowerCamelCase__ : Any = UMTaForConditionalGeneration(UpperCAmelCase ).eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Tuple = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase , head_masking.items() ):
lowerCamelCase__ : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase__ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase )
lowerCamelCase__ : Tuple = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , **UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase__ : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=UpperCAmelCase , legacy=UpperCAmelCase )
lowerCamelCase__ : Dict = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
lowerCamelCase__ : Tuple = tokenizer(UpperCAmelCase , return_tensors='pt' , padding=UpperCAmelCase ).input_ids
# fmt: off
lowerCamelCase__ : Any = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = model.generate(input_ids.to(UpperCAmelCase ) )
lowerCamelCase__ : List[Any] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 50
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# ===== initialization =====
_lowerCAmelCase : Tuple = Mock()
_lowerCAmelCase : Any = conn, Mock()
_lowerCAmelCase : Optional[Any] = iter([1, None] )
_lowerCAmelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 309
| 0
|
def A (__A : int = 1000000 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = limit + 1
UpperCAmelCase_ = [0] * limit
for first_term in range(1 , __A ):
for n in range(__A , __A , __A ):
UpperCAmelCase_ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCAmelCase_ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 51
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
| 0
|
import os
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[Any] = os.path.dirname(os.path.realpath(_lowerCAmelCase ) )
UpperCamelCase : Optional[int] = os.path.join(_lowerCAmelCase , "triangle.txt" )
with open(_lowerCAmelCase ) as f:
UpperCamelCase : Optional[int] = f.readlines()
UpperCamelCase : Tuple = []
for line in triangle:
UpperCamelCase : int = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(_lowerCAmelCase ) )
a.append(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
for j in range(len(a[i] ) ):
UpperCamelCase : Dict = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCamelCase : Tuple = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowerCAmelCase , _lowerCAmelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 52
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
__UpperCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__UpperCamelCase = tokenizer('Hello there' , return_tensors='tf' ).input_ids
__UpperCamelCase = tokenizer('Hi I am' , return_tensors='tf' ).input_ids
__UpperCamelCase = model(__A , labels=__A ).loss
__UpperCamelCase = -tf.math.reduce_mean(__A ).numpy()
__UpperCamelCase = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 53
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
| 0
|
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : str = '''T5Config'''
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = jnp.zeros_like(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__SCREAMING_SNAKE_CASE = shifted_input_ids.at[:, 0].set(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = jnp.where(shifted_input_ids == -100 , lowerCAmelCase_ , lowerCAmelCase_ )
return shifted_input_ids
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = "mt5"
snake_case__ : List[str] = MTaConfig
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : int = "mt5"
snake_case__ : Dict = MTaConfig
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Union[str, Any] = "mt5"
snake_case__ : Optional[int] = MTaConfig
| 54
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
| 0
|
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ):
if isinstance(UpperCAmelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCAmelCase_ , PIL.Image.Image ):
lowerCamelCase_ = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCamelCase_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
lowerCamelCase_ = np.concatenate(UpperCAmelCase_ , axis=0 )
lowerCamelCase_ = np.array(UpperCAmelCase_ ).astype(np.floataa ) / 255.0
lowerCamelCase_ = image.transpose(0 , 3 , 1 , 2 )
lowerCamelCase_ = 2.0 * image - 1.0
lowerCamelCase_ = torch.from_numpy(UpperCAmelCase_ )
elif isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(UpperCAmelCase_ , dim=0 )
return image
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any=0.9995 ):
if not isinstance(UpperCAmelCase_ , np.ndarray ):
lowerCamelCase_ = True
lowerCamelCase_ = va.device
lowerCamelCase_ = va.cpu().numpy()
lowerCamelCase_ = va.cpu().numpy()
lowerCamelCase_ = np.sum(va * va / (np.linalg.norm(UpperCAmelCase_ ) * np.linalg.norm(UpperCAmelCase_ )) )
if np.abs(UpperCAmelCase_ ) > DOT_THRESHOLD:
lowerCamelCase_ = (1 - t) * va + t * va
else:
lowerCamelCase_ = np.arccos(UpperCAmelCase_ )
lowerCamelCase_ = np.sin(UpperCAmelCase_ )
lowerCamelCase_ = theta_a * t
lowerCamelCase_ = np.sin(UpperCAmelCase_ )
lowerCamelCase_ = np.sin(theta_a - theta_t ) / sin_theta_a
lowerCamelCase_ = sin_theta_t / sin_theta_a
lowerCamelCase_ = sa * va + sa * va
if inputs_are_torch:
lowerCamelCase_ = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
return va
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ):
lowerCamelCase_ = F.normalize(UpperCAmelCase_ , dim=-1 )
lowerCamelCase_ = F.normalize(UpperCAmelCase_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ):
for param in model.parameters():
lowerCamelCase_ = value
class snake_case ( lowercase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vae=UpperCamelCase , text_encoder=UpperCamelCase , clip_model=UpperCamelCase , tokenizer=UpperCamelCase , unet=UpperCamelCase , scheduler=UpperCamelCase , feature_extractor=UpperCamelCase , coca_model=UpperCamelCase , coca_tokenizer=UpperCamelCase , coca_transform=UpperCamelCase , )
lowerCamelCase_ = (
feature_extractor.size
if isinstance(feature_extractor.size , UpperCamelCase )
else feature_extractor.size["shortest_edge"]
)
lowerCamelCase_ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , UpperCamelCase )
set_requires_grad(self.clip_model , UpperCamelCase )
def snake_case ( self , UpperCamelCase = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
self.enable_attention_slicing(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
set_requires_grad(self.vae , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
set_requires_grad(self.vae , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
set_requires_grad(self.unet , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
set_requires_grad(self.unet , UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
# get the original timestep using init_timestep
lowerCamelCase_ = min(int(num_inference_steps * strength ) , UpperCamelCase )
lowerCamelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ):
"""simple docstring"""
if not isinstance(UpperCamelCase , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(UpperCamelCase )}''' )
lowerCamelCase_ = image.to(device=UpperCamelCase , dtype=UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCamelCase )
]
lowerCamelCase_ = torch.cat(UpperCamelCase , dim=0 )
else:
lowerCamelCase_ = self.vae.encode(UpperCamelCase ).latent_dist.sample(UpperCamelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 0.18_215 * init_latents
lowerCamelCase_ = init_latents.repeat_interleave(UpperCamelCase , dim=0 )
lowerCamelCase_ = randn_tensor(init_latents.shape , generator=UpperCamelCase , device=UpperCamelCase , dtype=UpperCamelCase )
# get latents
lowerCamelCase_ = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = init_latents
return latents
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.coca_transform(UpperCamelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowerCamelCase_ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowerCamelCase_ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.feature_extractor.preprocess(UpperCamelCase )
lowerCamelCase_ = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
lowerCamelCase_ = self.clip_model.get_image_features(UpperCamelCase )
lowerCamelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase )
lowerCamelCase_ = image_embeddings_clip.repeat_interleave(UpperCamelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = latents.detach().requires_grad_()
lowerCamelCase_ = self.scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# predict the noise residual
lowerCamelCase_ = self.unet(UpperCamelCase , UpperCamelCase , encoder_hidden_states=UpperCamelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowerCamelCase_ = self.scheduler.alphas_cumprod[timestep]
lowerCamelCase_ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase_ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowerCamelCase_ = torch.sqrt(UpperCamelCase )
lowerCamelCase_ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , UpperCamelCase ):
lowerCamelCase_ = self.scheduler.sigmas[index]
lowerCamelCase_ = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 1 / 0.18_215 * sample
lowerCamelCase_ = self.vae.decode(UpperCamelCase ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = transforms.Resize(self.feature_extractor_size )(UpperCamelCase )
lowerCamelCase_ = self.normalize(UpperCamelCase ).to(latents.dtype )
lowerCamelCase_ = self.clip_model.get_image_features(UpperCamelCase )
lowerCamelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase )
lowerCamelCase_ = spherical_dist_loss(UpperCamelCase , UpperCamelCase ).mean() * clip_guidance_scale
lowerCamelCase_ = -torch.autograd.grad(UpperCamelCase , UpperCamelCase )[0]
if isinstance(self.scheduler , UpperCamelCase ):
lowerCamelCase_ = latents.detach() + grads * (sigma**2)
lowerCamelCase_ = noise_pred_original
else:
lowerCamelCase_ = noise_pred_original - torch.sqrt(UpperCamelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 512 , UpperCamelCase = 512 , UpperCamelCase = 0.6 , UpperCamelCase = 50 , UpperCamelCase = 7.5 , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = 100 , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , UpperCamelCase = 0.8 , UpperCamelCase = 0.1 , UpperCamelCase = 0.1 , ):
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(UpperCamelCase )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(UpperCamelCase , torch.Generator ) and batch_size > 1:
lowerCamelCase_ = [generator] + [None] * (batch_size - 1)
lowerCamelCase_ = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
lowerCamelCase_ = [x[0] for x in coca_is_none if x[1]]
lowerCamelCase_ = ", ".join(UpperCamelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCamelCase ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
lowerCamelCase_ = self.get_image_description(UpperCamelCase )
if style_prompt is None:
if len(UpperCamelCase ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
lowerCamelCase_ = self.get_image_description(UpperCamelCase )
# get prompt text embeddings for content and style
lowerCamelCase_ = self.tokenizer(
UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase , return_tensors="pt" , )
lowerCamelCase_ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ = self.tokenizer(
UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase , return_tensors="pt" , )
lowerCamelCase_ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ = slerp(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# duplicate text embeddings for each generation per prompt
lowerCamelCase_ = text_embeddings.repeat_interleave(UpperCamelCase , dim=0 )
# set timesteps
lowerCamelCase_ = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_offset:
lowerCamelCase_ = 1
self.scheduler.set_timesteps(UpperCamelCase , **UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowerCamelCase_ ,lowerCamelCase_ = self.get_timesteps(UpperCamelCase , UpperCamelCase , self.device )
lowerCamelCase_ = timesteps[:1].repeat(UpperCamelCase )
# Preprocess image
lowerCamelCase_ = preprocess(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = self.prepare_latents(
UpperCamelCase , UpperCamelCase , UpperCamelCase , text_embeddings.dtype , self.device , UpperCamelCase )
lowerCamelCase_ = preprocess(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = self.prepare_latents(
UpperCamelCase , UpperCamelCase , UpperCamelCase , text_embeddings.dtype , self.device , UpperCamelCase )
lowerCamelCase_ = slerp(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if clip_guidance_scale > 0:
lowerCamelCase_ = self.get_clip_image_embeddings(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = self.get_clip_image_embeddings(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = slerp(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = content_text_input.input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer([""] , padding="max_length" , max_length=UpperCamelCase , return_tensors="pt" )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowerCamelCase_ = uncond_embeddings.repeat_interleave(UpperCamelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowerCamelCase_ = torch.randn(UpperCamelCase , generator=UpperCamelCase , device="cpu" , dtype=UpperCamelCase ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(UpperCamelCase , generator=UpperCamelCase , device=self.device , dtype=UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
# check if the scheduler accepts generator
lowerCamelCase_ = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowerCamelCase_ = generator
with self.progress_bar(total=UpperCamelCase ):
for i, t in enumerate(UpperCamelCase ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# predict the noise residual
lowerCamelCase_ = self.unet(UpperCamelCase , UpperCamelCase , encoder_hidden_states=UpperCamelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ ,lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowerCamelCase_ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowerCamelCase_ ,lowerCamelCase_ = self.cond_fn(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 1 / 0.18_215 * latents
lowerCamelCase_ = self.vae.decode(UpperCamelCase ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCamelCase , nsfw_content_detected=UpperCamelCase )
| 55
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309
| 0
|
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> str: # noqa: E741
'''simple docstring'''
while r - l > 1:
snake_case_ = (l + r) // 2
if v[m] >= key:
snake_case_ = m
else:
snake_case_ = m # noqa: E741
return r
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if len(__UpperCAmelCase ) == 0:
return 0
snake_case_ = [0] * len(__UpperCAmelCase )
snake_case_ = 1
snake_case_ = v[0]
for i in range(1, len(__UpperCAmelCase ) ):
if v[i] < tail[0]:
snake_case_ = v[i]
elif v[i] > tail[length - 1]:
snake_case_ = v[i]
length += 1
else:
snake_case_ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : List[Any] = min(_lowerCamelCase ) # min() finds the minimum value
_lowerCAmelCase : Tuple = max(_lowerCamelCase ) # max() finds the maximum value
_lowerCAmelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase : Dict = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase : Any = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase : Optional[int] = count + min_val
i += 1
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" , """ """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 309
| 0
|
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
A : Union[str, Any] = 5_0_0_0_3
A : Any = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str =PLBartTokenizer
__UpperCAmelCase : Optional[Any] =None
__UpperCAmelCase : Optional[Any] =False
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = PLBartTokenizer(__a , language_codes="base" , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = PLBartTokenizer(__a , language_codes="base" , keep_accents=__a )
__lowerCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
__lowerCAmelCase = tokenizer.vocab_size
__lowerCAmelCase = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 4 , __a )]
self.assertListEqual(__a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
__lowerCAmelCase = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
__lowerCAmelCase = tokenizer(__a ).input_ids
self.assertEqual(
tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , )
def snake_case ( self ):
__lowerCAmelCase = PLBartTokenizer(__a , language_codes="multi" , keep_accents=__a )
__lowerCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
__lowerCAmelCase = tokenizer.vocab_size
__lowerCAmelCase = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 7 , __a )]
self.assertListEqual(
__a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
__lowerCAmelCase = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
__lowerCAmelCase = tokenizer(__a ).input_ids
self.assertEqual(
tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict ="""uclanlp/plbart-python-en_XX"""
__UpperCAmelCase : List[str] =[
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
__UpperCAmelCase : List[Any] =[
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
__UpperCAmelCase : Optional[int] =[
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def snake_case ( cls ):
__lowerCAmelCase = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
__lowerCAmelCase = 1
return cls
def snake_case ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_00_03 )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def snake_case ( self ):
self.assertIn(__a , self.tokenizer.all_special_ids )
__lowerCAmelCase = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
__lowerCAmelCase = self.tokenizer.decode(__a , skip_special_tokens=__a )
__lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def snake_case ( self ):
__lowerCAmelCase = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , __a )
__lowerCAmelCase = 10
__lowerCAmelCase = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __a )
self.assertEqual(len(__a ) , __a )
def snake_case ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_00_04, 5_00_01] )
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
__lowerCAmelCase = PLBartTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="pt" )
__lowerCAmelCase = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
__lowerCAmelCase = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
__lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="pt" )
__lowerCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors="pt" )
__lowerCAmelCase = targets["input_ids"]
__lowerCAmelCase = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(__a ) , {
# A, test, EOS, en_XX
"input_ids": [[1_50, 2_42, 2, 5_00_03]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_00_01,
} , )
| 57
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309
| 0
|
'''simple docstring'''
import sys
lowercase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase ( __lowerCamelCase : str ) ->int:
_SCREAMING_SNAKE_CASE = 1
for digit in s:
product *= int(__lowerCamelCase )
return product
def lowerCamelCase ( __lowerCamelCase : str = N ) ->int:
_SCREAMING_SNAKE_CASE = -sys.maxsize - 1
_SCREAMING_SNAKE_CASE = n[:13]
_SCREAMING_SNAKE_CASE = 13
while cur_index < len(__lowerCamelCase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
_SCREAMING_SNAKE_CASE = substr[1:] + n[cur_index]
cur_index += 1
else:
_SCREAMING_SNAKE_CASE = max(__lowerCamelCase , str_eval(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 58
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__lowerCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
__lowerCamelCase = {
"""unc-nlp/lxmert-base-uncased""": 5_12,
}
__lowerCamelCase = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class UpperCAmelCase ( A_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : Tuple = PRETRAINED_INIT_CONFIGURATION
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = LxmertTokenizer
def __init__(self : Dict , snake_case__ : Tuple=None , snake_case__ : Optional[Any]=None , snake_case__ : Optional[Any]=True , snake_case__ : Tuple="[UNK]" , snake_case__ : Optional[Any]="[SEP]" , snake_case__ : Optional[Any]="[PAD]" , snake_case__ : List[Any]="[CLS]" , snake_case__ : Tuple="[MASK]" , snake_case__ : Dict=True , snake_case__ : Union[str, Any]=None , **snake_case__ : Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
snake_case : Union[str, Any] = getattr(snake_case__ , normalizer_state.pop("type" ) )
snake_case : str = do_lower_case
snake_case : List[Any] = strip_accents
snake_case : Optional[int] = tokenize_chinese_chars
snake_case : int = normalizer_class(**snake_case__ )
snake_case : Optional[Any] = do_lower_case
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=None ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : Optional[Any] = [self.sep_token_id]
snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case : List[Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 59
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""DPTFeatureExtractor"""]
UpperCamelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _snake_case ( _snake_case : Union[str, Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def _snake_case ( _snake_case : Optional[int] , _snake_case : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCAmelCase : Dict = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
lowerCAmelCase : List[Any] = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
lowerCAmelCase : List[Any] = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
lowerCAmelCase : List[str] = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
lowerCAmelCase : Optional[Any] = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
lowerCAmelCase : Tuple = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
lowerCAmelCase : int = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
lowerCAmelCase : List[Any] = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
lowerCAmelCase : List[str] = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
lowerCAmelCase : Dict = key.replace('''image_encoder.module''' , '''flava.image_model''' )
lowerCAmelCase : Optional[int] = key.replace('''text_encoder.module''' , '''flava.text_model''' )
lowerCAmelCase : Optional[int] = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
lowerCAmelCase : Tuple = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
lowerCAmelCase : Tuple = key.replace('''text_projection''' , '''flava.text_projection''' )
lowerCAmelCase : Dict = key.replace('''image_projection''' , '''flava.image_projection''' )
lowerCAmelCase : Union[str, Any] = value.float()
for key, value in codebook_state_dict.items():
lowerCAmelCase : Dict = value
return upgrade
@torch.no_grad()
def _snake_case ( _snake_case : Tuple , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : int=None ):
if config_path is not None:
lowerCAmelCase : Optional[int] = FlavaConfig.from_pretrained(_snake_case )
else:
lowerCAmelCase : Optional[Any] = FlavaConfig()
lowerCAmelCase : List[Any] = FlavaForPreTraining(_snake_case ).eval()
lowerCAmelCase : int = convert_dalle_checkpoint(_snake_case , _snake_case , save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
lowerCAmelCase : Tuple = torch.load(_snake_case , map_location='''cpu''' )
else:
lowerCAmelCase : str = torch.hub.load_state_dict_from_url(_snake_case , map_location='''cpu''' )
lowerCAmelCase : str = upgrade_state_dict(_snake_case , _snake_case )
hf_model.load_state_dict(_snake_case )
lowerCAmelCase : Optional[int] = hf_model.state_dict()
lowerCAmelCase : Any = count_parameters(_snake_case )
lowerCAmelCase : List[Any] = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case , _snake_case , atol=1E-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
snake_case__ : str = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--codebook_path''', default=None, type=str, help='''Path to flava codebook checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
snake_case__ : Any = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 60
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : list[float] ) -> Dict:
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
| 0
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 61
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
| 0
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
__UpperCamelCase ='hf-internal-testing/tiny-random-t5'
__UpperCamelCase =AutoTokenizer.from_pretrained(A_ )
__UpperCamelCase =AutoModelForSeqaSeqLM.from_pretrained(A_ )
__UpperCamelCase =tokenizer('This is me' , return_tensors='pt' )
__UpperCamelCase =model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__UpperCamelCase =model.generate(**A_ )
__UpperCamelCase =model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
__UpperCamelCase =AutoModelForSeqaSeqLM.from_pretrained(A_ )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__UpperCamelCase =model_reloaded.generate(**A_ )
self.assertTrue(torch.allclose(A_ , A_ ) )
def _a ( self ) -> str:
__UpperCamelCase ='hf-internal-testing/tiny-random-t5'
__UpperCamelCase =AutoModelForSeqaSeqLM.from_pretrained(A_ )
__UpperCamelCase =model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(A_ ):
model.save_pretrained(A_ )
__UpperCamelCase =model.reverse_bettertransformer()
model.save_pretrained(A_ )
| 62
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase_ : Union[str, Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**snake_case_ )
_lowerCAmelCase : str = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
A_ = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
A_ = parser.parse_args()
if args.model_type == "bert":
A_ = BertForMaskedLM.from_pretrained(args.model_name)
A_ = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
A_ = model.state_dict()
A_ = {}
for w in ["word_embeddings", "position_embeddings"]:
A_ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
A_ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
A_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
A_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
A_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
A_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
A_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
A_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
A_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
A_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
A_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
A_ = state_dict['''cls.predictions.decoder.weight''']
A_ = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
A_ = state_dict[F'''cls.predictions.transform.dense.{w}''']
A_ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 64
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[torch.FloatTensor] = None
__UpperCAmelCase : torch.FloatTensor = None
__UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
__UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
class A ( UpperCAmelCase_ ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : str=0 , __UpperCAmelCase : str=2 , __UpperCAmelCase : Union[str, Any]=5_1_2 , __UpperCAmelCase : List[str]="cls" , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : str=True , **__UpperCAmelCase : str , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = project_dim
UpperCAmelCase__ = pooler_fn
UpperCAmelCase__ = learn_encoder
UpperCAmelCase__ = use_attention_mask
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = [r'pooler', r'logit_scale']
__UpperCAmelCase : int = [r'position_ids', r'predictions.decoder.bias']
__UpperCAmelCase : Any = 'roberta'
__UpperCAmelCase : List[str] = RobertaSeriesConfig
def __init__(self : Tuple , __UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
super().__init__(__UpperCAmelCase )
UpperCAmelCase__ = XLMRobertaModel(__UpperCAmelCase )
UpperCAmelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase__ = getattr(__UpperCAmelCase , "has_pre_transformation" , __UpperCAmelCase )
if self.has_pre_transformation:
UpperCAmelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase__ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ = self.base_model(
input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_attentions=__UpperCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__UpperCAmelCase , )
if self.has_pre_transformation:
UpperCAmelCase__ = outputs["hidden_states"][-2]
UpperCAmelCase__ = self.pre_LN(__UpperCAmelCase )
UpperCAmelCase__ = self.transformation_pre(__UpperCAmelCase )
return TransformationModelOutput(
projection_state=__UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCAmelCase__ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 65
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
snake_case_ :Union[str, Any] = str(bin(_lowercase ) )
binary_number += "0" * shift_amount
return binary_number
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
snake_case_ :Optional[int] = str(bin(_lowercase ) )[2:]
if shift_amount >= len(_lowercase ):
return "0b0"
snake_case_ :Optional[int] = binary_number[: len(_lowercase ) - shift_amount]
return "0b" + shifted_binary_number
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
snake_case_ :Any = """0""" + str(bin(_lowercase ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case_ :Dict = len(bin(_lowercase )[3:] ) # Find 2's complement of number
snake_case_ :Optional[int] = bin(abs(_lowercase ) - (1 << binary_number_length) )[3:]
snake_case_ :Any = (
"""1""" + """0""" * (binary_number_length - len(_lowercase )) + binary_number
)
if shift_amount >= len(_lowercase ):
return "0b" + binary_number[0] * len(_lowercase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(_lowercase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase_ = 0
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase_ = tuple[int, int]
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = pos_x
_lowerCAmelCase : List[str] = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : List[Any] = goal_x
_lowerCAmelCase : int = goal_y
_lowerCAmelCase : Union[str, Any] = g_cost
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = self.calculate_heuristic()
_lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.pos_x - self.goal_x
_lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : int = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_lowerCAmelCase : List[str] = current_bwd_node
_lowerCAmelCase : Dict = current_fwd_node
_lowerCAmelCase : Any = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = AStar(init, goal)
UpperCamelCase_ = a_star.search()
UpperCamelCase_ = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalAStar(init, goal)
UpperCamelCase_ = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 309
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class a__ :
def __init__( self : List[Any] , a : Any ):
"""simple docstring"""
__lowerCamelCase = data
__lowerCamelCase = None
class a__ :
def __init__( self : Any ):
"""simple docstring"""
__lowerCamelCase = None
__lowerCamelCase = None
def __iter__( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.head
while self.head:
yield node.data
__lowerCamelCase = node.next
if node == self.head:
break
def __len__( self : Any ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : Union[str, Any] ):
"""simple docstring"""
return "->".join(str(a ) for item in iter(self ) )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Any ):
"""simple docstring"""
self.insert_nth(len(self ) , a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Any ):
"""simple docstring"""
self.insert_nth(0 , a )
def SCREAMING_SNAKE_CASE__ ( self : int , a : int , a : Any ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
__lowerCamelCase = Node(a )
if self.head is None:
__lowerCamelCase = new_node # first node points itself
__lowerCamelCase = __lowerCamelCase = new_node
elif index == 0: # insert at head
__lowerCamelCase = self.head
__lowerCamelCase = __lowerCamelCase = new_node
else:
__lowerCamelCase = self.head
for _ in range(index - 1 ):
__lowerCamelCase = temp.next
__lowerCamelCase = temp.next
__lowerCamelCase = new_node
if index == len(self ) - 1: # insert at tail
__lowerCamelCase = new_node
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return self.delete_nth(0 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : int = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
__lowerCamelCase = self.head
if self.head == self.tail: # just one node
__lowerCamelCase = __lowerCamelCase = None
elif index == 0: # delete head node
__lowerCamelCase = self.tail.next.next
__lowerCamelCase = self.head.next
else:
__lowerCamelCase = self.head
for _ in range(index - 1 ):
__lowerCamelCase = temp.next
__lowerCamelCase = temp.next
__lowerCamelCase = temp.next.next
if index == len(self ) - 1: # delete at tail
__lowerCamelCase = temp
return delete_node.data
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return len(self ) == 0
def __lowerCAmelCase ( ) -> None:
__lowerCamelCase = CircularLinkedList()
assert len(UpperCamelCase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCamelCase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCamelCase__ ) == i
circular_linked_list.insert_nth(UpperCamelCase__ , i + 1 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = ReformerTokenizer
__lowerCamelCase = ReformerTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = True
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
A__ = ReformerTokenizer(lowercase , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = "<s>"
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase ) , 1000 )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = "I was born in 92000, and this is falsé."
A__ = tokenizer.tokenize(lowercase )
A__ = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
A__ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
A__ = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(lowercase )
A__ = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
def UpperCamelCase ( self , lowercase=15 ) -> Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A__ = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
# Simple input
A__ = "This is a simple input"
A__ = ["This is a simple input 1", "This is a simple input 2"]
A__ = ("This is a simple input", "This is a pair")
A__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding="max_length" )
# Simple input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding="max_length" )
# Simple input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding="max_length" , )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding="max_length" )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding="max_length" )
# Pair input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding="max_length" , )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = ReformerTokenizer(lowercase , keep_accents=lowercase )
A__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [285, 46, 10, 170, 382] , )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A__ = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A__ = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = "Hello World!"
A__ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@slow
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
A__ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@require_torch
@slow
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
A__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
A__ = " ".join(lowercase )
A__ = self.big_tokenizer.encode_plus(lowercase , return_tensors="pt" )
A__ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
A__ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
A__ = encoded_sequence["input_ids"].shape
A__ = ReformerModel(lowercase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase )
model(**lowercase )
@slow
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
A__ = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowercase , sequences=lowercase , )
| 68
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger('''transformers.models.speecht5''')
__UpperCamelCase = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__UpperCamelCase = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__UpperCamelCase = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__UpperCamelCase = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__UpperCamelCase = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__UpperCamelCase = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__UpperCamelCase = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__UpperCamelCase = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__UpperCamelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__UpperCamelCase = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__UpperCamelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__UpperCamelCase = []
__UpperCamelCase = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__UpperCamelCase = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__UpperCamelCase = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__UpperCamelCase = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
for attribute in key.split('.' ):
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "running_mean":
snake_case_ = value
elif weight_type == "running_var":
snake_case_ = value
elif weight_type == "num_batches_tracked":
snake_case_ = value
else:
snake_case_ = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case_ , snake_case_ = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
snake_case_ = []
if task == "s2t":
snake_case_ = hf_model.speechta.encoder.prenet.feature_encoder
snake_case_ = MAPPING_S2T
snake_case_ = IGNORE_KEYS_S2T
elif task == "t2s":
snake_case_ = None
snake_case_ = MAPPING_T2S
snake_case_ = IGNORE_KEYS_T2S
elif task == "s2s":
snake_case_ = hf_model.speechta.encoder.prenet.feature_encoder
snake_case_ = MAPPING_S2S
snake_case_ = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(UpperCAmelCase , UpperCAmelCase ):
logger.info(f'{name} was ignored' )
continue
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
snake_case_ , snake_case_ = key.split('.*.' )
if prefix in name and suffix in name:
snake_case_ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(UpperCAmelCase )[0].split('.' )[-2]
snake_case_ = mapped_key.replace('*' , UpperCAmelCase )
if "weight_g" in name:
snake_case_ = 'weight_g'
elif "weight_v" in name:
snake_case_ = 'weight_v'
elif "bias" in name:
snake_case_ = 'bias'
elif "weight" in name:
snake_case_ = 'weight'
elif "running_mean" in name:
snake_case_ = 'running_mean'
elif "running_var" in name:
snake_case_ = 'running_var'
elif "num_batches_tracked" in name:
snake_case_ = 'num_batches_tracked'
else:
snake_case_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = full_name.split('conv_layers.' )[-1]
snake_case_ = name.split('.' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ) -> Dict:
if config_path is not None:
snake_case_ = SpeechTaConfig.from_pretrained(UpperCAmelCase )
else:
snake_case_ = SpeechTaConfig()
if task == "s2t":
snake_case_ = config.max_text_positions
snake_case_ = SpeechTaForSpeechToText(UpperCAmelCase )
elif task == "t2s":
snake_case_ = 1876
snake_case_ = 600
snake_case_ = config.max_speech_positions
snake_case_ = SpeechTaForTextToSpeech(UpperCAmelCase )
elif task == "s2s":
snake_case_ = 1876
snake_case_ = config.max_speech_positions
snake_case_ = SpeechTaForSpeechToSpeech(UpperCAmelCase )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
snake_case_ = SpeechTaTokenizer(UpperCAmelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
snake_case_ = AddedToken('<mask>' , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase )
snake_case_ = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
snake_case_ = SpeechTaFeatureExtractor()
snake_case_ = SpeechTaProcessor(tokenizer=UpperCAmelCase , feature_extractor=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
snake_case_ = torch.load(UpperCAmelCase )
recursively_load_weights(fairseq_checkpoint['model'] , UpperCAmelCase , UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCAmelCase )
model.push_to_hub(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__UpperCamelCase = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 69
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
_lowercase: int = KandinskyVaaImgaImgPipeline
_lowercase: List[str] = ['''image_embeds''', '''negative_image_embeds''', '''image''']
_lowercase: Optional[int] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_lowercase: Tuple = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowercase: List[str] = False
@property
def lowercase__ ( self : str ) -> List[str]:
return 32
@property
def lowercase__ ( self : Optional[int] ) -> List[Any]:
return 32
@property
def lowercase__ ( self : Tuple ) -> str:
return self.time_input_dim
@property
def lowercase__ ( self : Any ) -> Optional[int]:
return self.time_input_dim * 4
@property
def lowercase__ ( self : int ) -> Optional[Any]:
return 1_00
@property
def lowercase__ ( self : int ) -> Dict:
torch.manual_seed(0 )
_lowerCAmelCase = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase = UNetaDConditionModel(**__snake_case )
return model
@property
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self : Dict ) -> str:
torch.manual_seed(0 )
_lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
_lowerCAmelCase = self.dummy_unet
_lowerCAmelCase = self.dummy_movq
_lowerCAmelCase = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase = DDIMScheduler(**__snake_case )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowercase__ ( self : int , __snake_case : List[str] , __snake_case : List[Any]=0 ) -> Union[str, Any]:
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
_lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(__snake_case )
else:
_lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_lowerCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowercase__ ( self : str ) -> Tuple:
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**__snake_case )
_lowerCAmelCase = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_lowerCAmelCase = pipe(**self.get_dummy_inputs(__snake_case ) )
_lowerCAmelCase = output.images
_lowerCAmelCase = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : Any ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : int ) -> Dict:
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase = """A red cartoon frog, 4k"""
_lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
_lowerCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_lowerCAmelCase = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
_lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 70
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
| 0
|
from torch import nn
def A ( a_ ) -> Any:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'Unsupported activation function: {act_fn}' )
| 71
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "roberta"
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int]=5_0_2_6_5 , __lowerCAmelCase : List[str]=7_6_8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[Any]=1_2 , __lowerCAmelCase : Union[str, Any]=3_0_7_2 , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Dict=5_1_2 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[Any]=1E-12 , __lowerCAmelCase : str=1 , __lowerCAmelCase : List[Any]=0 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Tuple="absolute" , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : Any = type_vocab_size
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Optional[Any] = layer_norm_eps
_lowerCamelCase : int = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Any = classifier_dropout
class __snake_case ( _lowercase):
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCamelCase : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 72
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# ===== initialization =====
_lowerCAmelCase : Tuple = Mock()
_lowerCAmelCase : Any = conn, Mock()
_lowerCAmelCase : Optional[Any] = iter([1, None] )
_lowerCAmelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 309
| 0
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
a ="""
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
a ="""\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
a ="""
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=[\"About 95 species are currently accepted .\"]
>>> predictions=[\"About 95 you now get in .\"]
>>> references=[[\"About 95 species are currently known .\"]]
>>> wiki_split = datasets.load_metric(\"wiki_split\")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]:
def remove_articles(lowerCamelCase__ ):
__lowerCamelCase : Dict = re.compile(R'\b(a|an|the)\b' , re.UNICODE )
return re.sub(lowerCamelCase__ , ' ' , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
__lowerCamelCase : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
return int(normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
__lowerCamelCase : Union[str, Any] = [any(compute_exact(lowerCamelCase__ , lowerCamelCase__ ) for ref in refs ) for pred, refs in zip(lowerCamelCase__ , lowerCamelCase__ )]
return (sum(lowerCamelCase__ ) / len(lowerCamelCase__ )) * 1_0_0
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : List[str] = [rgram for rgrams in rgramslist for rgram in rgrams]
__lowerCamelCase : Any = Counter(lowerCamelCase__ )
__lowerCamelCase : Optional[int] = Counter(lowerCamelCase__ )
__lowerCamelCase : List[str] = Counter()
for sgram, scount in sgramcounter.items():
__lowerCamelCase : Any = scount * numref
__lowerCamelCase : Union[str, Any] = Counter(lowerCamelCase__ )
__lowerCamelCase : Optional[int] = Counter()
for cgram, ccount in cgramcounter.items():
__lowerCamelCase : List[str] = ccount * numref
# KEEP
__lowerCamelCase : Optional[Any] = sgramcounter_rep & cgramcounter_rep
__lowerCamelCase : List[Any] = keepgramcounter_rep & rgramcounter
__lowerCamelCase : int = sgramcounter_rep & rgramcounter
__lowerCamelCase : int = 0
__lowerCamelCase : Optional[int] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowerCamelCase : Optional[int] = 1
__lowerCamelCase : Optional[Any] = 1
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase : Tuple = keeptmpscorea / len(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__lowerCamelCase : Tuple = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__lowerCamelCase : Dict = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__lowerCamelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__lowerCamelCase : str = sgramcounter_rep - cgramcounter_rep
__lowerCamelCase : Tuple = delgramcounter_rep - rgramcounter
__lowerCamelCase : Tuple = sgramcounter_rep - rgramcounter
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : int = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowerCamelCase : Any = 1
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase : List[Any] = deltmpscorea / len(lowerCamelCase__ )
# ADDITION
__lowerCamelCase : str = set(lowerCamelCase__ ) - set(lowerCamelCase__ )
__lowerCamelCase : Optional[int] = set(lowerCamelCase__ ) & set(lowerCamelCase__ )
__lowerCamelCase : str = set(lowerCamelCase__ ) - set(lowerCamelCase__ )
__lowerCamelCase : List[str] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowerCamelCase : int = 1
__lowerCamelCase : List[str] = 1
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase : Any = addtmpscore / len(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase : Optional[int] = addtmpscore / len(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = 0
if addscore_precision > 0 or addscore_recall > 0:
__lowerCamelCase : List[Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
__lowerCamelCase : int = len(lowerCamelCase__ )
__lowerCamelCase : int = ssent.split(' ' )
__lowerCamelCase : Optional[Any] = csent.split(' ' )
__lowerCamelCase : int = []
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Tuple = []
__lowerCamelCase : str = []
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : List[Any] = []
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : Dict = []
for rsent in rsents:
__lowerCamelCase : str = rsent.split(' ' )
__lowerCamelCase : List[Any] = []
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Union[str, Any] = []
ragramslist.append(lowerCamelCase__ )
for i in range(0 , len(lowerCamelCase__ ) - 1 ):
if i < len(lowerCamelCase__ ) - 1:
__lowerCamelCase : List[str] = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 2:
__lowerCamelCase : List[Any] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 3:
__lowerCamelCase : List[str] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(lowerCamelCase__ )
ragramslist.append(lowerCamelCase__ )
ragramslist.append(lowerCamelCase__ )
ragramslist.append(lowerCamelCase__ )
for i in range(0 , len(lowerCamelCase__ ) - 1 ):
if i < len(lowerCamelCase__ ) - 1:
__lowerCamelCase : Any = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 2:
__lowerCamelCase : Optional[int] = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 3:
__lowerCamelCase : Any = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(lowerCamelCase__ )
for i in range(0 , len(lowerCamelCase__ ) - 1 ):
if i < len(lowerCamelCase__ ) - 1:
__lowerCamelCase : Any = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 2:
__lowerCamelCase : str = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 3:
__lowerCamelCase : Tuple = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(lowerCamelCase__ )
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : Dict = SARIngram(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : Union[str, Any] = SARIngram(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : Union[str, Any] = SARIngram(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : str = SARIngram(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__lowerCamelCase : Optional[int] = sum([delascore, delascore, delascore, delascore] ) / 4
__lowerCamelCase : str = sum([addascore, addascore, addascore, addascore] ) / 4
__lowerCamelCase : List[str] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = True , lowerCamelCase__ = "13a" , lowerCamelCase__ = True ) -> Any:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
__lowerCamelCase : Optional[Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__lowerCamelCase : int = sacrebleu.metrics.bleu._get_tokenizer(lowerCamelCase__ )()(lowerCamelCase__ )
else:
__lowerCamelCase : str = sacrebleu.TOKENIZERS[tokenizer]()(lowerCamelCase__ )
elif tokenizer == "moses":
__lowerCamelCase : Optional[Any] = sacremoses.MosesTokenizer().tokenize(lowerCamelCase__ , return_str=lowerCamelCase__ , escape=lowerCamelCase__ )
elif tokenizer == "penn":
__lowerCamelCase : Tuple = sacremoses.MosesTokenizer().penn_tokenize(lowerCamelCase__ , return_str=lowerCamelCase__ )
else:
__lowerCamelCase : List[Any] = sentence
if not return_str:
__lowerCamelCase : Optional[int] = normalized_sent.split()
return normalized_sent
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
if not (len(lowerCamelCase__ ) == len(lowerCamelCase__ ) == len(lowerCamelCase__ )):
raise ValueError('Sources length must match predictions and references lengths.' )
__lowerCamelCase : List[str] = 0
for src, pred, refs in zip(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
sari_score += SARIsent(normalize(lowerCamelCase__ ) , normalize(lowerCamelCase__ ) , [normalize(lowerCamelCase__ ) for sent in refs] )
__lowerCamelCase : int = sari_score / len(lowerCamelCase__ )
return 1_0_0 * sari_score
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="exp" , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , ) -> int:
__lowerCamelCase : Tuple = len(references[0] )
if any(len(lowerCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
__lowerCamelCase : Optional[int] = [[refs[i] for refs in references] for i in range(lowerCamelCase__ )]
__lowerCamelCase : Dict = sacrebleu.corpus_bleu(
lowerCamelCase__ , lowerCamelCase__ , smooth_method=lowerCamelCase__ , smooth_value=lowerCamelCase__ , force=lowerCamelCase__ , lowercase=lowerCamelCase__ , use_effective_order=lowerCamelCase__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : str):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence'),
'references': datasets.Sequence(datasets.Value('string' ,id='sequence') ,id='references'),
}) ,codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] ,reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : List[str] = {}
result.update({'sari': compute_sari(sources=SCREAMING_SNAKE_CASE__ ,predictions=SCREAMING_SNAKE_CASE__ ,references=SCREAMING_SNAKE_CASE__)})
result.update({'sacrebleu': compute_sacrebleu(predictions=SCREAMING_SNAKE_CASE__ ,references=SCREAMING_SNAKE_CASE__)})
result.update({'exact': compute_em(predictions=SCREAMING_SNAKE_CASE__ ,references=SCREAMING_SNAKE_CASE__)})
return result
| 73
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = GPTaTokenizer
_lowerCamelCase: Tuple = GPTaTokenizerFast
_lowerCamelCase: str = True
_lowerCamelCase: Any = {'''add_prefix_space''': True}
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
A = dict(zip(A_ ,range(len(A_ ) ) ) )
A = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A = {'unk_token': '<unk>'}
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,**A_ : Tuple ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,**A_ : List[str] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[Any] ) -> Union[str, Any]:
A = 'lower newer'
A = 'lower newer'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = GPTaTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
A = 'lower newer'
A = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
A = tokenizer.tokenize(A_ ,add_prefix_space=A_ )
self.assertListEqual(A_ ,A_ )
A = tokens + [tokenizer.unk_token]
A = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
A = self.get_tokenizer()
A = self.get_rust_tokenizer(add_prefix_space=A_ )
A = 'lower newer'
# Testing tokenization
A = tokenizer.tokenize(A_ ,add_prefix_space=A_ )
A = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,A_ )
# Testing conversion to ids without special tokens
A = tokenizer.encode(A_ ,add_special_tokens=A_ ,add_prefix_space=A_ )
A = rust_tokenizer.encode(A_ ,add_special_tokens=A_ )
self.assertListEqual(A_ ,A_ )
# Testing conversion to ids with special tokens
A = self.get_rust_tokenizer(add_prefix_space=A_ )
A = tokenizer.encode(A_ ,add_prefix_space=A_ )
A = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ ,A_ )
# Testing the unknown token
A = tokens + [rust_tokenizer.unk_token]
A = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,*A_ : Any ,**A_ : Dict ) -> Any:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any]=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ )
# Simple input
A = 'This is a simple input'
A = ['This is a simple input 1', 'This is a simple input 2']
A = ('This is a simple input', 'This is a pair')
A = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(A_ ,tokenizer_r.encode ,A_ ,max_length=A_ ,padding='max_length' )
# Simple input
self.assertRaises(A_ ,tokenizer_r.encode_plus ,A_ ,max_length=A_ ,padding='max_length' )
# Simple input
self.assertRaises(
A_ ,tokenizer_r.batch_encode_plus ,A_ ,max_length=A_ ,padding='max_length' ,)
# Pair input
self.assertRaises(A_ ,tokenizer_r.encode ,A_ ,max_length=A_ ,padding='max_length' )
# Pair input
self.assertRaises(A_ ,tokenizer_r.encode_plus ,A_ ,max_length=A_ ,padding='max_length' )
# Pair input
self.assertRaises(
A_ ,tokenizer_r.batch_encode_plus ,A_ ,max_length=A_ ,padding='max_length' ,)
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = GPTaTokenizer.from_pretrained(self.tmpdirname ,pad_token='<pad>' )
# Simple input
A = 'This is a simple input'
A = ['This is a simple input looooooooong', 'This is a simple input']
A = ('This is a simple input', 'This is a pair')
A = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
A = tokenizer.pad_token_id
A = tokenizer(A_ ,padding='max_length' ,max_length=30 ,return_tensors='np' )
A = tokenizer(A_ ,padding=A_ ,truncate=A_ ,return_tensors='np' )
A = tokenizer(*A_ ,padding='max_length' ,max_length=60 ,return_tensors='np' )
A = tokenizer(A_ ,padding=A_ ,truncate=A_ ,return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
A = '$$$'
A = GPTaTokenizer.from_pretrained(self.tmpdirname ,bos_token=A_ ,add_bos_token=A_ )
A = 'This is a simple input'
A = ['This is a simple input 1', 'This is a simple input 2']
A = tokenizer.bos_token_id
A = tokenizer(A_ )
A = tokenizer(A_ )
self.assertEqual(out_s.input_ids[0] ,A_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
A = tokenizer.decode(out_s.input_ids )
A = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,A_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
A = [self.get_tokenizer(do_lower_case=A_ ,add_bos_token=A_ )]
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
A = 'Encode this.'
A = 'This one too please.'
A = tokenizer.encode(A_ ,add_special_tokens=A_ )
encoded_sequence += tokenizer.encode(A_ ,add_special_tokens=A_ )
A = tokenizer.encode_plus(
A_ ,A_ ,add_special_tokens=A_ ,return_special_tokens_mask=A_ ,)
A = encoded_sequence_dict['input_ids']
A = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(A_ ) ,len(A_ ) )
A = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ )
]
A = [x for x in filtered_sequence if x is not None]
self.assertEqual(A_ ,A_ )
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
A = AutoTokenizer.from_pretrained('facebook/opt-350m' ,from_slow=A_ )
A = 'A photo of a cat'
A = tokenizer.encode(
A_ ,)
self.assertEqual(A_ ,[2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
A = AutoTokenizer.from_pretrained('./test_opt' )
A = tokenizer.encode(
A_ ,)
self.assertEqual(A_ ,[2, 250, 1345, 9, 10, 4758] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = AutoTokenizer.from_pretrained('facebook/opt-350m' ,use_slow=A_ )
A = 'A photo of a cat'
A = tokenizer.encode(
A_ ,)
# Same as above
self.assertEqual(A_ ,[2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = AutoTokenizer.from_pretrained('facebook/opt-350m' ,from_slow=A_ )
A = 'bos'
A = tokenizer.get_vocab()['bos']
A = 'A photo of a cat'
A = tokenizer.encode(
A_ ,)
# We changed the bos token
self.assertEqual(A_ ,[3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
A = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
A = tokenizer.encode(
A_ ,)
self.assertEqual(A_ ,[3_1957, 250, 1345, 9, 10, 4758] )
| 74
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __UpperCamelCase ( unittest.TestCase ):
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.dummy_uncond_unet
lowerCamelCase_ =ScoreSdeVeScheduler()
lowerCamelCase_ =ScoreSdeVePipeline(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
sde_ve.to(lowerCAmelCase )
sde_ve.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =sde_ve(num_inference_steps=2, output_type='''numpy''', generator=lowerCAmelCase ).images
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =sde_ve(num_inference_steps=2, output_type='''numpy''', generator=lowerCAmelCase, return_dict=lowerCAmelCase )[
0
]
lowerCamelCase_ =image[0, -3:, -3:, -1]
lowerCamelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''google/ncsnpp-church-256'''
lowerCamelCase_ =UNetaDModel.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =ScoreSdeVeScheduler.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =ScoreSdeVePipeline(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
sde_ve.to(lowerCAmelCase )
sde_ve.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =sde_ve(num_inference_steps=10, output_type='''numpy''', generator=lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase_ =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 75
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 76
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
| 0
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_UpperCamelCase : Optional[int] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_UpperCamelCase : Any = concatenate_datasets
_UpperCamelCase : Dict = DownloadConfig
_UpperCamelCase : Dict = DownloadManager
_UpperCamelCase : Dict = DownloadMode
_UpperCamelCase : int = DownloadConfig
_UpperCamelCase : Union[str, Any] = DownloadMode
_UpperCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 77
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """rwkv"""
__UpperCamelCase = {"""max_position_embeddings""": """context_length"""}
def __init__( self :Dict , lowercase_ :Tuple=5_02_77 , lowercase_ :Union[str, Any]=10_24 , lowercase_ :Union[str, Any]=40_96 , lowercase_ :Dict=32 , lowercase_ :Optional[int]=None , lowercase_ :int=None , lowercase_ :Any=1E-5 , lowercase_ :Union[str, Any]=0 , lowercase_ :List[str]=0 , lowercase_ :str=6 , lowercase_ :Optional[int]=False , lowercase_ :Tuple=True , **lowercase_ :Optional[int] , ) -> Dict:
UpperCAmelCase = vocab_size
UpperCAmelCase = context_length
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = rescale_every
UpperCAmelCase = use_cache
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 78
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : List[Any] = min(_lowerCamelCase ) # min() finds the minimum value
_lowerCAmelCase : Tuple = max(_lowerCamelCase ) # max() finds the maximum value
_lowerCAmelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase : Dict = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase : Any = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase : Optional[int] = count + min_val
i += 1
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" , """ """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 309
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase_ = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''albert'''
def __init__( self : List[Any] , __UpperCAmelCase : Union[str, Any]=30000 , __UpperCAmelCase : List[str]=128 , __UpperCAmelCase : List[Any]=4096 , __UpperCAmelCase : Optional[Any]=12 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : List[str]=64 , __UpperCAmelCase : str=16384 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : int="gelu_new" , __UpperCAmelCase : Dict=0 , __UpperCAmelCase : Tuple=0 , __UpperCAmelCase : Tuple=512 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : Optional[Any]=1E-12 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Optional[Any]="absolute" , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Dict=3 , **__UpperCAmelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_A = vocab_size
_A = embedding_size
_A = hidden_size
_A = num_hidden_layers
_A = num_hidden_groups
_A = num_attention_heads
_A = inner_group_num
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = classifier_dropout_prob
_A = position_embedding_type
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if self.task == "multiple-choice":
_A = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 79
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309
| 0
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
UpperCamelCase__ = torch.nn.Linear(10 , 10 )
UpperCamelCase__ = torch.optim.SGD(model.parameters() , 0.1 )
UpperCamelCase__ = Accelerator()
UpperCamelCase__ = accelerator.prepare(a )
try:
pickle.loads(pickle.dumps(a ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 80
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "openai/whisper-base"
__lowerCAmelCase = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__lowerCAmelCase = "transcriber"
__lowerCAmelCase = WhisperProcessor
__lowerCAmelCase = WhisperForConditionalGeneration
__lowerCAmelCase = ["audio"]
__lowerCAmelCase = ["text"]
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[Any]:
return self.pre_processor(__A , return_tensors='''pt''' ).input_features
def SCREAMING_SNAKE_CASE ( self , __A ) -> Tuple:
return self.model.generate(inputs=__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]:
return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0]
| 81
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""DPTFeatureExtractor"""]
UpperCamelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
if index == number_of_items:
return 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = knapsack(snake_case , snake_case , snake_case , snake_case , index + 1 )
if weights[index] <= max_weight:
_lowerCAmelCase = values[index] + knapsack(
snake_case , snake_case , snake_case , max_weight - weights[index] , index + 1 )
return max(snake_case , snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : list[float] ) -> Dict:
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
| 0
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : Optional[int]=0 ):
'''simple docstring'''
_UpperCamelCase : List[Any] = np.random.RandomState(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Tuple = self.get_dummy_inputs()
_UpperCamelCase : Optional[Any] = pipe(**lowerCamelCase__ ).images
_UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase : str = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_UpperCamelCase : Dict = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Dict = self.get_dummy_inputs()
_UpperCamelCase : str = pipe(**lowerCamelCase__ ).images
_UpperCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase : Dict = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_UpperCamelCase : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Tuple = self.get_dummy_inputs()
_UpperCamelCase : str = pipe(**lowerCamelCase__ ).images
_UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase : List[str] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_UpperCamelCase : str = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self.get_dummy_inputs()
_UpperCamelCase : Union[str, Any] = pipe(**lowerCamelCase__ ).images
_UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase : Dict = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_UpperCamelCase : Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Any = self.get_dummy_inputs()
_UpperCamelCase : List[Any] = pipe(**lowerCamelCase__ ).images
_UpperCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase : List[Any] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_UpperCamelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : int = self.get_dummy_inputs()
_UpperCamelCase : Optional[int] = pipe(**lowerCamelCase__ ).images
_UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase : Dict = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = self.get_dummy_inputs()
_UpperCamelCase : int = 3 * [inputs['prompt']]
# forward
_UpperCamelCase : Union[str, Any] = pipe(**lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = output.images[0, -3:, -3:, -1]
_UpperCamelCase : Dict = self.get_dummy_inputs()
_UpperCamelCase : Optional[int] = 3 * [inputs.pop('prompt' )]
_UpperCamelCase : Tuple = pipe.tokenizer(
lowerCamelCase__ ,padding='max_length' ,max_length=pipe.tokenizer.model_max_length ,truncation=lowerCamelCase__ ,return_tensors='np' ,)
_UpperCamelCase : Dict = text_inputs['input_ids']
_UpperCamelCase : Optional[Any] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_UpperCamelCase : Tuple = prompt_embeds
# forward
_UpperCamelCase : Dict = pipe(**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : int = self.get_dummy_inputs()
_UpperCamelCase : Dict = 3 * ['this is a negative prompt']
_UpperCamelCase : Optional[int] = negative_prompt
_UpperCamelCase : Tuple = 3 * [inputs['prompt']]
# forward
_UpperCamelCase : Tuple = pipe(**lowerCamelCase__ )
_UpperCamelCase : int = output.images[0, -3:, -3:, -1]
_UpperCamelCase : List[Any] = self.get_dummy_inputs()
_UpperCamelCase : Optional[int] = 3 * [inputs.pop('prompt' )]
_UpperCamelCase : List[str] = []
for p in [prompt, negative_prompt]:
_UpperCamelCase : int = pipe.tokenizer(
lowerCamelCase__ ,padding='max_length' ,max_length=pipe.tokenizer.model_max_length ,truncation=lowerCamelCase__ ,return_tensors='np' ,)
_UpperCamelCase : Optional[Any] = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = embeds
# forward
_UpperCamelCase : Dict = pipe(**lowerCamelCase__ )
_UpperCamelCase : int = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Dict = ort.SessionOptions()
_UpperCamelCase : str = False
return options
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# using the PNDM scheduler by default
_UpperCamelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : List[Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0 )
_UpperCamelCase : int = sd_pipe([prompt] ,guidance_scale=6.0 ,num_inference_steps=10 ,output_type='np' )
_UpperCamelCase : List[Any] = output.images
_UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase : Tuple = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
_UpperCamelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=lowerCamelCase__ ,safety_checker=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Any = 'open neural network exchange'
_UpperCamelCase : List[Any] = np.random.RandomState(0 )
_UpperCamelCase : List[Any] = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=lowerCamelCase__ ,output_type='np' )
_UpperCamelCase : Optional[Any] = output.images
_UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase : Optional[int] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Any = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
_UpperCamelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=lowerCamelCase__ ,safety_checker=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : List[Any] = 'open neural network exchange'
_UpperCamelCase : Dict = np.random.RandomState(0 )
_UpperCamelCase : Union[str, Any] = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=lowerCamelCase__ ,output_type='np' )
_UpperCamelCase : Union[str, Any] = output.images
_UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase : Optional[Any] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = 0
def test_callback_fn(lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : np.ndarray ) -> None:
_UpperCamelCase : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_UpperCamelCase : Any = latents[0, -3:, -3:, -1]
_UpperCamelCase : Dict = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_UpperCamelCase : Union[str, Any] = latents[0, -3:, -3:, -1]
_UpperCamelCase : int = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
_UpperCamelCase : Dict = False
_UpperCamelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,safety_checker=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Tuple = 'Andromeda galaxy in a bottle'
_UpperCamelCase : Optional[Any] = np.random.RandomState(0 )
pipe(
prompt=lowerCamelCase__ ,num_inference_steps=5 ,guidance_scale=7.5 ,generator=lowerCamelCase__ ,callback=lowerCamelCase__ ,callback_steps=1 ,)
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,safety_checker=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
assert isinstance(lowerCamelCase__ ,lowerCamelCase__ )
assert pipe.safety_checker is None
_UpperCamelCase : List[Any] = pipe('example prompt' ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
_UpperCamelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCamelCase : Union[str, Any] = pipe('example prompt' ,num_inference_steps=2 ).images[0]
assert image is not None
| 83
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
| 0
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :List[Any] = 3
lowerCAmelCase_ :List[Any] = 250
lowerCAmelCase_ :Any = ids_tensor((batch_size, length) , __A )
lowerCAmelCase_ :List[Any] = torch.ones((batch_size, length) , device=__A , dtype=torch.float ) / length
return input_ids, scores
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ :int = self._get_tensors(5 )
lowerCAmelCase_ :Union[str, Any] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__A , __A ) )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self._get_tensors(9 )
self.assertFalse(criteria(__A , __A ) )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = self._get_tensors(10 )
self.assertTrue(criteria(__A , __A ) )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[Any] = MaxLengthCriteria(max_length=10 )
lowerCAmelCase_ , lowerCAmelCase_ :Any = self._get_tensors(5 )
self.assertFalse(criteria(__A , __A ) )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = self._get_tensors(9 )
self.assertFalse(criteria(__A , __A ) )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(__A , __A ) )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Dict = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = self._get_tensors(5 )
self.assertFalse(criteria(__A , __A ) )
lowerCAmelCase_ , lowerCAmelCase_ :Any = self._get_tensors(9 )
self.assertFalse(criteria(__A , __A ) )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(__A , __A ) )
lowerCAmelCase_ :List[str] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Any = self._get_tensors(5 )
lowerCAmelCase_ :List[Any] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__A , __A ) )
lowerCAmelCase_ :Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__A , __A ) )
def __lowerCAmelCase ( self ) -> Tuple:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(__A ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCAmelCase_ :Dict = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(__A ) , 1 )
| 84
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _snake_case ( lowercase_ , lowercase_ ):
lowerCAmelCase_ : Tuple = "swin"
lowerCAmelCase_ : Dict = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=32 , a__=None , a__=None , **a__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = depths
snake_case_ = len(a__ )
snake_case_ = num_heads
snake_case_ = window_size
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = use_absolute_embeddings
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ = int(embed_dim * 2 ** (len(a__ ) - 1) )
snake_case_ = ["stem"] + [F'stage{idx}' for idx in range(1 , len(a__ ) + 1 )]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : str = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1e-4
| 85
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**snake_case_ )
_lowerCAmelCase : str = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309
| 0
|
"""simple docstring"""
from collections.abc import Generator
def __lowerCAmelCase ():
__lowerCAmelCase , __lowerCAmelCase : List[Any] = 0, 1
while True:
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = b, a + b
yield b
def __lowerCAmelCase (_UpperCamelCase = 1000 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : List[str] = fibonacci_generator()
while len(str(next(_UpperCamelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 86
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 0
|
def lowercase_ ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int]):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path)
def lowercase_ ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : list[int] , _lowerCamelCase : int):
# Base Case
if curr_ind == len(_lowerCamelCase):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_lowerCamelCase)):
if valid_connection(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
# Insert current vertex into path as next transition
lowercase__ : Any = next_ver
# Validate created path
if util_hamilton_cycle(_lowerCamelCase , _lowerCamelCase , curr_ind + 1):
return True
# Backtrack
lowercase__ : Optional[Any] = -1
return False
def lowercase_ ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : int = 0):
lowercase__ : Optional[Any] = [-1] * (len(_lowerCamelCase) + 1)
# initialize start and end of path with starting index
lowercase__ : List[Any] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_lowerCamelCase , _lowerCamelCase , 1) else []
| 87
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
from __future__ import annotations
from typing import TypedDict
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = 42
a__ = 42
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(A_ ) )]
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
__magic_name__ = all_rotations(A_ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
__magic_name__ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(A_ ),
}
return response
def a__ ( A_, A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
__magic_name__ = int(A_ )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(A_ ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
__magic_name__ = [""""""] * len(A_ )
for _ in range(len(A_ ) ):
for i in range(len(A_ ) ):
__magic_name__ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__lowerCAmelCase : Tuple = 'Provide a string that I will generate its BWT transform: '
__lowerCAmelCase : str = input(entry_msg).strip()
__lowerCAmelCase : Dict = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result["bwt_string"]}\''''
)
__lowerCAmelCase : Optional[Any] = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '''
F'''we get original string \'{original_string}\''''
)
| 88
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase_ = 0
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase_ = tuple[int, int]
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = pos_x
_lowerCAmelCase : List[str] = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : List[Any] = goal_x
_lowerCAmelCase : int = goal_y
_lowerCAmelCase : Union[str, Any] = g_cost
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = self.calculate_heuristic()
_lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.pos_x - self.goal_x
_lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : int = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_lowerCAmelCase : List[str] = current_bwd_node
_lowerCAmelCase : Dict = current_fwd_node
_lowerCAmelCase : Any = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = AStar(init, goal)
UpperCamelCase_ = a_star.search()
UpperCamelCase_ = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalAStar(init, goal)
UpperCamelCase_ = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 309
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[Any] = 'table-transformer'
lowerCAmelCase : str = ['past_key_values']
lowerCAmelCase : Any = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : str ,_UpperCAmelCase : int=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[str]=3 ,_UpperCAmelCase : int=100 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : int=2048 ,_UpperCAmelCase : List[str]=8 ,_UpperCAmelCase : Tuple=6 ,_UpperCAmelCase : str=2048 ,_UpperCAmelCase : str=8 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Tuple="relu" ,_UpperCAmelCase : Any=256 ,_UpperCAmelCase : Optional[Any]=0.1 ,_UpperCAmelCase : Tuple=0.0 ,_UpperCAmelCase : List[Any]=0.0 ,_UpperCAmelCase : Optional[int]=0.02 ,_UpperCAmelCase : Union[str, Any]=1.0 ,_UpperCAmelCase : Optional[Any]=False ,_UpperCAmelCase : Tuple="sine" ,_UpperCAmelCase : Optional[Any]="resnet50" ,_UpperCAmelCase : Any=True ,_UpperCAmelCase : List[Any]=False ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Optional[Any]=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : List[Any]=1 ,_UpperCAmelCase : int=1 ,_UpperCAmelCase : Optional[Any]=5 ,_UpperCAmelCase : Dict=2 ,_UpperCAmelCase : int=0.1 ,**_UpperCAmelCase : List[Any] ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_a : Tuple = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Any = backbone_config.get('model_type' )
_a : List[str] = CONFIG_MAPPING[backbone_model_type]
_a : Dict = config_class.from_dict(_UpperCAmelCase )
# set timm attributes to None
_a , _a , _a : int = None, None, None
_a : str = use_timm_backbone
_a : List[Any] = backbone_config
_a : Tuple = num_channels
_a : List[str] = num_queries
_a : Tuple = d_model
_a : Optional[Any] = encoder_ffn_dim
_a : Tuple = encoder_layers
_a : List[str] = encoder_attention_heads
_a : List[str] = decoder_ffn_dim
_a : Tuple = decoder_layers
_a : Any = decoder_attention_heads
_a : int = dropout
_a : Optional[int] = attention_dropout
_a : Optional[int] = activation_dropout
_a : int = activation_function
_a : Optional[int] = init_std
_a : List[Any] = init_xavier_std
_a : Dict = encoder_layerdrop
_a : str = decoder_layerdrop
_a : Optional[Any] = encoder_layers
_a : Optional[int] = auxiliary_loss
_a : List[str] = position_embedding_type
_a : Tuple = backbone
_a : List[str] = use_pretrained_backbone
_a : List[str] = dilation
# Hungarian matcher
_a : Dict = class_cost
_a : Union[str, Any] = bbox_cost
_a : str = giou_cost
# Loss coefficients
_a : str = mask_loss_coefficient
_a : Tuple = dice_loss_coefficient
_a : Optional[int] = bbox_loss_coefficient
_a : List[str] = giou_loss_coefficient
_a : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase )
@property
def __lowercase ( self : Tuple ):
return self.encoder_attention_heads
@property
def __lowercase ( self : Tuple ):
return self.d_model
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : int = version.parse('1.11' )
@property
def __lowercase ( self : int ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __lowercase ( self : Tuple ):
return 1E-5
@property
def __lowercase ( self : Tuple ):
return 12
| 89
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309
| 0
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
__A = False
__A = False
def lowerCamelCase_ ( UpperCamelCase__ : Namespace ) -> Optional[int]:
"""simple docstring"""
return TrainCommand(UpperCamelCase__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@staticmethod
def lowercase_ ( lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=lowerCamelCase__ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=lowerCamelCase__ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=lowerCamelCase__ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=lowerCamelCase__ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=lowerCamelCase__ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=lowerCamelCase__ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=lowerCamelCase__ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=lowerCamelCase__ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=lowerCamelCase__ , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=lowerCamelCase__ , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=lowerCamelCase__ , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=lowerCamelCase__ , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = logging.get_logger('transformers-cli/training' )
__lowerCamelCase = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=lowerCamelCase__ )
__lowerCamelCase = args.output
__lowerCamelCase = args.column_label
__lowerCamelCase = args.column_text
__lowerCamelCase = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
__lowerCamelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
__lowerCamelCase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__lowerCamelCase = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
__lowerCamelCase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__lowerCamelCase = args.validation_split
__lowerCamelCase = args.train_batch_size
__lowerCamelCase = args.valid_batch_size
__lowerCamelCase = args.learning_rate
__lowerCamelCase = args.adam_epsilon
def lowercase_ ( self ) -> int:
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 90
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
"""simple docstring"""
from typing import Any
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = data
SCREAMING_SNAKE_CASE_ : int = None
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = None
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.head
while temp is not None:
print(temp.data , end=''' ''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = temp.next
print()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = Node(lowercase_)
SCREAMING_SNAKE_CASE_ : int = self.head
SCREAMING_SNAKE_CASE_ : List[str] = new_node
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
SCREAMING_SNAKE_CASE_ : List[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE_ : List[Any] = node_a.next
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = node_a.next
if node_a is None or node_a is None:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = node_a.data, node_a.data
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 91
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309
| 0
|
from jiwer import compute_measures
import datasets
UpperCamelCase__ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
UpperCamelCase__ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
UpperCamelCase__ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def __SCREAMING_SNAKE_CASE( self , _A=None , _A=None , _A=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(_A , _A )["wer"]
else:
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for prediction, reference in zip(_A , _A ):
__lowerCAmelCase = compute_measures(_A , _A )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 92
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
| 0
|
'''simple docstring'''
from math import ceil
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 1001 ):
"""simple docstring"""
lowercase_ : Tuple = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowercase_ : Optional[Any] = 2 * i + 1
lowercase_ : Any = 2 * i
lowercase_ : List[str] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_lowercase : Optional[int] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 93
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 0
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = ['image_processor']
SCREAMING_SNAKE_CASE__ = 'SamImageProcessor'
def __init__( self , _lowerCamelCase ):
super().__init__(_lowerCamelCase )
a :Dict = self.image_processor
a :str = -10
a :List[str] = self.image_processor.size['''longest_edge''']
def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = None , **_lowerCamelCase , ):
a :List[Any] = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
# pop arguments that are not used in the foward but used nevertheless
a :Any = encoding_image_processor['''original_sizes''']
if hasattr(_lowerCamelCase , '''numpy''' ): # Checks if Torch or TF tensor
a :Union[str, Any] = original_sizes.numpy()
a , a , a :Optional[Any] = self._check_and_preprocess_points(
input_points=_lowerCamelCase , input_labels=_lowerCamelCase , input_boxes=_lowerCamelCase , )
a :Optional[Any] = self._normalize_and_convert(
_lowerCamelCase , _lowerCamelCase , input_points=_lowerCamelCase , input_labels=_lowerCamelCase , input_boxes=_lowerCamelCase , return_tensors=_lowerCamelCase , )
return encoding_image_processor
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="pt" , ):
if input_points is not None:
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
a :Tuple = [
self._normalize_coordinates(self.target_size , _lowerCamelCase , original_sizes[0] ) for point in input_points
]
else:
a :str = [
self._normalize_coordinates(self.target_size , _lowerCamelCase , _lowerCamelCase )
for point, original_size in zip(_lowerCamelCase , _lowerCamelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
a , a :Tuple = self._pad_points_and_labels(_lowerCamelCase , _lowerCamelCase )
a :List[str] = np.array(_lowerCamelCase )
if input_labels is not None:
a :Tuple = np.array(_lowerCamelCase )
if input_boxes is not None:
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
a :Dict = [
self._normalize_coordinates(self.target_size , _lowerCamelCase , original_sizes[0] , is_bounding_box=_lowerCamelCase )
for box in input_boxes
]
else:
a :str = [
self._normalize_coordinates(self.target_size , _lowerCamelCase , _lowerCamelCase , is_bounding_box=_lowerCamelCase )
for box, original_size in zip(_lowerCamelCase , _lowerCamelCase )
]
a :Union[str, Any] = np.array(_lowerCamelCase )
if input_boxes is not None:
if return_tensors == "pt":
a :Optional[Any] = torch.from_numpy(_lowerCamelCase )
# boxes batch size of 1 by default
a :Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
a :Dict = tf.convert_to_tensor(_lowerCamelCase )
# boxes batch size of 1 by default
a :Tuple = tf.expand_dims(_lowerCamelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
a :List[Any] = torch.from_numpy(_lowerCamelCase )
# point batch size of 1 by default
a :Any = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
a :Tuple = tf.convert_to_tensor(_lowerCamelCase )
# point batch size of 1 by default
a :int = tf.expand_dims(_lowerCamelCase , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
a :Dict = torch.from_numpy(_lowerCamelCase )
# point batch size of 1 by default
a :int = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
a :Union[str, Any] = tf.convert_to_tensor(_lowerCamelCase )
# point batch size of 1 by default
a :Dict = tf.expand_dims(_lowerCamelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
a :List[Any] = max([point.shape[0] for point in input_points] )
a :Union[str, Any] = []
for i, point in enumerate(_lowerCamelCase ):
if point.shape[0] != expected_nb_points:
a :int = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
a :Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_lowerCamelCase )
a :Any = processed_input_points
return input_points, input_labels
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
a , a :str = original_size
a , a :Optional[Any] = self.image_processor._get_preprocess_shape(_lowerCamelCase , longest_edge=_lowerCamelCase )
a :List[Any] = deepcopy(_lowerCamelCase ).astype(_lowerCamelCase )
if is_bounding_box:
a :Dict = coords.reshape(-1 , 2 , 2 )
a :Tuple = coords[..., 0] * (new_w / old_w)
a :Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
a :Any = coords.reshape(-1 , 4 )
return coords
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ):
if input_points is not None:
if hasattr(_lowerCamelCase , '''numpy''' ): # Checks for TF or Torch tensor
a :int = input_points.numpy().tolist()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(input_points[0] , _lowerCamelCase ):
raise ValueError('''Input points must be a list of list of floating points.''' )
a :Dict = [np.array(_lowerCamelCase ) for input_point in input_points]
else:
a :List[str] = None
if input_labels is not None:
if hasattr(_lowerCamelCase , '''numpy''' ):
a :Optional[int] = input_labels.numpy().tolist()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(input_labels[0] , _lowerCamelCase ):
raise ValueError('''Input labels must be a list of list integers.''' )
a :List[Any] = [np.array(_lowerCamelCase ) for label in input_labels]
else:
a :Optional[Any] = None
if input_boxes is not None:
if hasattr(_lowerCamelCase , '''numpy''' ):
a :int = input_boxes.numpy().tolist()
if (
not isinstance(_lowerCamelCase , _lowerCamelCase )
or not isinstance(input_boxes[0] , _lowerCamelCase )
or not isinstance(input_boxes[0][0] , _lowerCamelCase )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
a :Optional[int] = [np.array(_lowerCamelCase ).astype(np.floataa ) for box in input_boxes]
else:
a :Optional[int] = None
return input_points, input_labels, input_boxes
@property
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.image_processor.post_process_masks(*_lowerCamelCase , **_lowerCamelCase )
| 94
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# ===== initialization =====
_lowerCAmelCase : Tuple = Mock()
_lowerCAmelCase : Any = conn, Mock()
_lowerCAmelCase : Optional[Any] = iter([1, None] )
_lowerCAmelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 309
| 0
|
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
while a != 0:
a__ , a__ : Optional[Any] =b % a, a
return b
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) != 1:
a__ : Optional[Any] =f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(SCREAMING_SNAKE_CASE )
a__ , a__ , a__ : Dict =1, 0, a
a__ , a__ , a__ : List[Any] =0, 1, m
while va != 0:
a__ : str =ua // va
a__ , a__ , a__ , a__ , a__ , a__ : Dict =(ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 95
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase )
| 96
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'layoutlmv3'
def __init__( self , UpperCamelCase_=50265 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-5 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=1024 , UpperCamelCase_=128 , UpperCamelCase_=128 , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=128 , UpperCamelCase_=64 , UpperCamelCase_=256 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=224 , UpperCamelCase_=3 , UpperCamelCase_=16 , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
vocab_size=UpperCamelCase_ , hidden_size=UpperCamelCase_ , num_hidden_layers=UpperCamelCase_ , num_attention_heads=UpperCamelCase_ , intermediate_size=UpperCamelCase_ , hidden_act=UpperCamelCase_ , hidden_dropout_prob=UpperCamelCase_ , attention_probs_dropout_prob=UpperCamelCase_ , max_position_embeddings=UpperCamelCase_ , type_vocab_size=UpperCamelCase_ , initializer_range=UpperCamelCase_ , layer_norm_eps=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Tuple = max_ad_position_embeddings
UpperCamelCase__ :List[Any] = coordinate_size
UpperCamelCase__ :Union[str, Any] = shape_size
UpperCamelCase__ :int = has_relative_attention_bias
UpperCamelCase__ :Any = rel_pos_bins
UpperCamelCase__ :Tuple = max_rel_pos
UpperCamelCase__ :Optional[Any] = has_spatial_attention_bias
UpperCamelCase__ :Union[str, Any] = rel_ad_pos_bins
UpperCamelCase__ :Union[str, Any] = max_rel_ad_pos
UpperCamelCase__ :Dict = text_embed
UpperCamelCase__ :Optional[Any] = visual_embed
UpperCamelCase__ :Dict = input_size
UpperCamelCase__ :List[Any] = num_channels
UpperCamelCase__ :Optional[int] = patch_size
UpperCamelCase__ :str = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
_a = version.parse('1.12' )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return 1e-5
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return 12
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = 3 , UpperCamelCase_ = 40 , UpperCamelCase_ = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ :Optional[int] = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ :Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ :Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase__ :str = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase__ :Optional[Any] = self._generate_dummy_images(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Any = dict(
processor(
UpperCamelCase_ , text=UpperCamelCase_ , boxes=UpperCamelCase_ , return_tensors=UpperCamelCase_ , ) )
return inputs
| 97
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
| 0
|
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase__ : Dict = '<<<<<<< This should probably be modified because it mentions: '
lowerCAmelCase__ : int = '=======\n>>>>>>>\n'
lowerCAmelCase__ : Optional[int] = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCAmelCase__ : Dict = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def a_ ( lowerCamelCase ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( lowerCamelCase__ : ArgumentParser ):
UpperCAmelCase__ = parser.add_parser(
'convert' ,help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' ,)
train_parser.add_argument(
'--tfds_path' ,type=lowerCamelCase__ ,required=lowerCamelCase__ ,help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' ,)
train_parser.add_argument(
'--datasets_directory' ,type=lowerCamelCase__ ,required=lowerCamelCase__ ,help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,*lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = get_logger('datasets-cli/converting' )
UpperCAmelCase__ = tfds_path
UpperCAmelCase__ = datasets_directory
def __lowerCAmelCase ( self : List[Any] ):
if os.path.isdir(self._tfds_path ):
UpperCAmelCase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
UpperCAmelCase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
UpperCAmelCase__ = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = {}
if os.path.isdir(self._tfds_path ):
UpperCAmelCase__ = os.listdir(lowerCamelCase__ )
else:
UpperCAmelCase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
UpperCAmelCase__ = os.path.join(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = os.path.join(lowerCamelCase__ ,lowerCamelCase__ )
if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCamelCase__ ,encoding='utf-8' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = []
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = []
for line in lines:
UpperCAmelCase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCAmelCase__ = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
UpperCAmelCase__ = ''
continue
elif "from absl import logging" in out_line:
UpperCAmelCase__ = 'from datasets import logging\n'
elif "getLogger" in out_line:
UpperCAmelCase__ = out_line.replace('getLogger' ,'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
UpperCAmelCase__ = True
UpperCAmelCase__ = list(filter(lambda lowerCamelCase__ : e in out_line ,lowerCamelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + '\n' )
out_lines.append(lowerCamelCase__ )
out_lines.append(lowerCamelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCAmelCase__ = re.sub(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCAmelCase__ = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' ,lowerCamelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
UpperCAmelCase__ = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCAmelCase__ = True
out_lines.append(lowerCamelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCAmelCase__ = f_name.replace('.py' ,'' )
UpperCAmelCase__ = os.path.join(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = os.path.join(lowerCamelCase__ ,lowerCamelCase__ )
os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCamelCase__ )
if needs_manual_update:
with_manual_update.append(lowerCamelCase__ )
with open(lowerCamelCase__ ,'w' ,encoding='utf-8' ) as f:
f.writelines(lowerCamelCase__ )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
UpperCAmelCase__ = os.path.basename(lowerCamelCase__ )
UpperCAmelCase__ = imports_to_builder_map[f_name.replace('.py' ,'' )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(lowerCamelCase__ ,lowerCamelCase__ )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 98
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
| 0
|
from __future__ import annotations
import pandas as pd
def A_ ( A__ , A__ , A__ ) -> list[int]:
a__ : Optional[Any] = [0] * no_of_processes
a__ : Tuple = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(A__ ):
a__ : Optional[Any] = burst_time[i]
a__ : Optional[int] = 0
a__ : int = 0
a__ : int = 9_9999_9999
a__ : Optional[int] = 0
a__ : Tuple = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(A__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
a__ : Dict = remaining_time[j]
a__ : int = j
a__ : Tuple = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
a__ : Tuple = remaining_time[short]
if minm == 0:
a__ : Optional[Any] = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
a__ : Optional[int] = False
# Find finish time of current process
a__ : Optional[Any] = increment_time + 1
# Calculate waiting time
a__ : Optional[Any] = finish_time - arrival_time[short]
a__ : Any = finar - burst_time[short]
if waiting_time[short] < 0:
a__ : int = 0
# Increment time
increment_time += 1
return waiting_time
def A_ ( A__ , A__ , A__ ) -> list[int]:
a__ : List[str] = [0] * no_of_processes
for i in range(A__ ):
a__ : Dict = burst_time[i] + waiting_time[i]
return turn_around_time
def A_ ( A__ , A__ , A__ ) -> None:
a__ : List[Any] = 0
a__ : Any = 0
for i in range(A__ ):
a__ : str = total_waiting_time + waiting_time[i]
a__ : Tuple = total_turn_around_time + turn_around_time[i]
print(F'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
lowercase : str = int(input())
lowercase : List[str] = [0] * no_of_processes
lowercase : int = [0] * no_of_processes
lowercase : List[Any] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
lowercase , lowercase : Optional[int] = map(int, input().split())
lowercase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowercase : Dict = burst_time
lowercase : int = no_of_processes
lowercase : Any = waiting_time
lowercase : Union[str, Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowercase : Any = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 99
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309
| 0
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "spiece.model"}
__magic_name__ = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
__magic_name__ = {
"google/bigbird-roberta-base": 4096,
"google/bigbird-roberta-large": 4096,
"google/bigbird-base-trivia-itc": 4096,
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Tuple = VOCAB_FILES_NAMES
__lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
__lowercase : List[int] = []
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
__SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCAmelCase__)
@property
def snake_case_ ( self):
return self.sp_model.get_piece_size()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
__SCREAMING_SNAKE_CASE = self.__dict__.copy()
__SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def snake_case_ ( self , lowerCAmelCase__):
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
return self.sp_model.piece_to_id(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(lowerCAmelCase__)
return token
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = """"""
__SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__) + token
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(lowerCAmelCase__)
return out_string.strip()
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = True , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = kwargs.pop("""use_source_tokenizer""" , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = []
sub_texts.append(lowerCAmelCase__)
else:
current_sub_text.append(lowerCAmelCase__)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__))
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__SCREAMING_SNAKE_CASE = re.sub(R""" (\[(MASK|SEP)\])""" , R"""\1""" , """ """.join(lowerCAmelCase__))
else:
__SCREAMING_SNAKE_CASE = """""".join(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__SCREAMING_SNAKE_CASE = self.clean_up_tokenization(lowerCAmelCase__)
return clean_text
else:
return text
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase__ , """wb""") as fi:
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__)
return (out_vocab_file,)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1] + ([0] * len(lowerCAmelCase__)) + [1]
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
| 100
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : List[Any] = min(_lowerCamelCase ) # min() finds the minimum value
_lowerCAmelCase : Tuple = max(_lowerCamelCase ) # max() finds the maximum value
_lowerCAmelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase : Dict = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase : Any = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase : Optional[int] = count + min_val
i += 1
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" , """ """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 309
| 0
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [False] * len(lowerCAmelCase__ )
lowercase = [-1] * len(lowerCAmelCase__ )
def dfs(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = True
lowercase = c
for u in graph[v]:
if not visited[u]:
dfs(lowerCAmelCase__ , 1 - c )
for i in range(len(lowerCAmelCase__ ) ):
if not visited[i]:
dfs(lowerCAmelCase__ , 0 )
for i in range(len(lowerCAmelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowercase__ :Union[str, Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 101
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='pix2struct_text_model'
lowerCamelCase__ =['past_key_values']
lowerCamelCase__ ={
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , a_=5_02_44 , a_=7_68 , a_=64 , a_=20_48 , a_=12 , a_=12 , a_=32 , a_=1_28 , a_=0.1 , a_=1E-6 , a_=1.0 , a_="gelu_new" , a_=0 , a_=False , a_=0 , a_=1 , a_=False , a_=True , **a_ , ):
'''simple docstring'''
__snake_case : Tuple = vocab_size
__snake_case : Any = hidden_size
__snake_case : List[Any] = d_kv
__snake_case : Optional[Any] = d_ff
__snake_case : Union[str, Any] = num_layers
__snake_case : int = num_heads
__snake_case : Optional[Any] = relative_attention_num_buckets
__snake_case : Union[str, Any] = relative_attention_max_distance
__snake_case : Dict = dropout_rate
__snake_case : Any = layer_norm_epsilon
__snake_case : Union[str, Any] = initializer_factor
__snake_case : Tuple = use_cache
__snake_case : str = eos_token_id
__snake_case : Any = decoder_start_token_id
# for backwards compatibility
__snake_case : List[str] = dense_act_fn
super().__init__(
pad_token_id=a_ , eos_token_id=a_ , decoder_start_token_id=a_ , tie_word_embeddings=a_ , is_decoder=a_ , **a_ , )
@classmethod
def SCREAMING_SNAKE_CASE (cls , a_ , **a_ ):
'''simple docstring'''
cls._set_token_in_kwargs(a_ )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(a_ , **a_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__snake_case : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a_ , **a_ )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='pix2struct_vision_model'
def __init__(self , a_=7_68 , a_=7_68 , a_=20_48 , a_=64 , a_=12 , a_=12 , a_="gelu_new" , a_=1E-6 , a_=0.0 , a_=0.0 , a_=1E-10 , a_=1.0 , a_=40_96 , a_=32 , a_=1_28 , **a_ , ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Any = hidden_size
__snake_case : Tuple = patch_embed_hidden_size
__snake_case : int = d_ff
__snake_case : str = dropout_rate
__snake_case : str = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : List[Any] = initializer_range
__snake_case : int = initializer_factor
__snake_case : Dict = attention_dropout
__snake_case : Any = layer_norm_eps
__snake_case : int = dense_act_fn
__snake_case : Union[str, Any] = seq_len
__snake_case : int = relative_attention_num_buckets
__snake_case : Optional[Any] = relative_attention_max_distance
__snake_case : List[str] = d_kv
@classmethod
def SCREAMING_SNAKE_CASE (cls , a_ , **a_ ):
'''simple docstring'''
cls._set_token_in_kwargs(a_ )
__snake_case , __snake_case : Optional[Any] = cls.get_config_dict(a_ , **a_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__snake_case : List[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a_ , **a_ )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='pix2struct'
lowerCamelCase__ =True
def __init__(self , a_=None , a_=None , a_=1.0 , a_=0.02 , a_=False , a_=False , a_=True , **a_ , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=a_ , is_encoder_decoder=a_ , **a_ )
if text_config is None:
__snake_case : List[str] = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
__snake_case : Dict = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
__snake_case : Dict = PixaStructTextConfig(**a_ )
__snake_case : Optional[int] = PixaStructVisionConfig(**a_ )
__snake_case : Any = self.text_config.decoder_start_token_id
__snake_case : Tuple = self.text_config.pad_token_id
__snake_case : Optional[int] = self.text_config.eos_token_id
__snake_case : str = initializer_factor
__snake_case : Tuple = initializer_range
__snake_case : Tuple = self.initializer_range
__snake_case : str = self.initializer_range
__snake_case : Union[str, Any] = is_vqa
@classmethod
def SCREAMING_SNAKE_CASE (cls , a_ , a_ , **a_ ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : Dict = self.vision_config.to_dict()
__snake_case : str = self.__class__.model_type
return output
| 102
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : Tuple = 1_0
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : List[str] = [1, 2, 3, 4]
lowerCAmelCase_ : Tuple = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(A_ , self.block_size , 0) , A_)
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
lowerCAmelCase_ : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(A_ , self.block_size , 0) , A_)
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
lowerCAmelCase_ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(A_ , self.block_size , 0) , A_)
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Optional[int] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
lowerCAmelCase_ , lowerCAmelCase_ : Dict = process_story(A_)
self.assertEqual(A_ , [])
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Optional[Any] = ''''''
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = process_story(A_)
self.assertEqual(A_ , [])
self.assertEqual(A_ , [])
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Union[str, Any] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = process_story(A_)
lowerCAmelCase_ : str = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(A_ , A_)
lowerCAmelCase_ : Dict = ['''It was the best of times.''']
self.assertEqual(A_ , A_)
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : int = torch.tensor([1, 2, 3, 4])
lowerCAmelCase_ : List[Any] = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(A_ , 0).numpy() , expected.numpy())
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : str = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3])
lowerCAmelCase_ : int = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(A_ , 2_3).numpy() , expected.numpy())
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1])
lowerCAmelCase_ : Union[str, Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(A_ , 1).numpy() , expected.numpy())
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Optional[Any] = 1_0_1
lowerCAmelCase_ : Optional[int] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]])
lowerCAmelCase_ : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
lowerCAmelCase_ : Optional[int] = compute_token_type_ids(A_ , A_)
np.testing.assert_array_equal(A_ , A_)
| 103
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""DPTFeatureExtractor"""]
UpperCamelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : list[float] ) -> Dict:
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
| 0
|
"""simple docstring"""
from __future__ import annotations
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ ) -> None:
a : str = order
# a_{0} ... a_{k}
a : int = [1.0] + [0.0] * order
# b_{0} ... b_{k}
a : Union[str, Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
a : Any = [0.0] * self.order
# y[n-1] ... y[n-k]
a : Tuple = [0.0] * self.order
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if len(lowerCAmelCase__ ) < self.order:
a : Dict = [1.0, *a_coeffs]
if len(lowerCAmelCase__ ) != self.order + 1:
a : Optional[int] = (
f"""Expected a_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(lowerCAmelCase__ )}"""
)
raise ValueError(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) != self.order + 1:
a : Optional[Any] = (
f"""Expected b_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(lowerCAmelCase__ )}"""
)
raise ValueError(lowerCAmelCase__ )
a : Any = a_coeffs
a : int = b_coeffs
def __a ( self , lowerCAmelCase__ ) -> float:
a : List[str] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
a : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
a : Optional[Any] = self.input_history[:-1]
a : str = self.output_history[:-1]
a : int = sample
a : List[Any] = result
return result
| 105
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
| 0
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__UpperCamelCase : Tuple = TypeVar('''T''')
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
lowercase__ = 42 # Cache store of keys
lowercase__ = 42 # References of the keys in cache
lowercase__ = 10 # Maximum capacity of cache
def __init__( self : Dict ,lowercase_ : int ):
lowerCAmelCase__ : str = deque()
lowerCAmelCase__ : Any = set()
if not n:
lowerCAmelCase__ : Optional[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowerCAmelCase__ : int = n
def __lowerCAmelCase ( self : str ,lowercase_ : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCAmelCase__ : Any = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __lowerCAmelCase ( self : int ):
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Tuple ):
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 106
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309
| 0
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : Any = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : Union[str, Any] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : Union[str, Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__lowerCAmelCase : Tuple = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__lowerCAmelCase : Union[str, Any] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__lowerCAmelCase : Union[str, Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__lowerCAmelCase : Tuple = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__lowerCAmelCase : str = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Dict = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[str] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : Dict = DPRContextEncoderTokenizer
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : Optional[int] = DPRQuestionEncoderTokenizer
__lowerCAmelCase : List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__lowerCAmelCase : Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__lowerCAmelCase : Optional[Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCamelCase )
class snake_case__ :
"""simple docstring"""
def __call__( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Optional[int] , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
elif titles is None or texts is None:
a = titles if texts is None else texts
return super().__call__(
__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
a = titles if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [titles]
a = texts if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [texts]
a = len(__lowerCamelCase )
a = questions if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [questions] * n_passages
assert len(__lowerCamelCase ) == len(
__lowerCamelCase ), f"""There should be as many titles than texts but got {len(__lowerCamelCase )} titles and {len(__lowerCamelCase )} texts."""
a = super().__call__(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
a = super().__call__(__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
a = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCamelCase , __lowerCamelCase )
]
}
if return_attention_mask is not False:
a = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
a = attention_mask
return self.pad(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : BatchEncoding , __lowerCamelCase : DPRReaderOutput , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 4 , ) -> List[DPRSpanPrediction]:
a = reader_input["input_ids"]
a , a , a = reader_output[:3]
a = len(__lowerCamelCase )
a = sorted(range(__lowerCamelCase ) , reverse=__lowerCamelCase , key=relevance_logits.__getitem__ )
a = []
for doc_id in sorted_docs:
a = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
a = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
a = sequence_ids.index(self.pad_token_id )
else:
a = len(__lowerCamelCase )
a = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCamelCase , top_spans=__lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCamelCase , start_index=__lowerCamelCase , end_index=__lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] , __lowerCamelCase : int , __lowerCamelCase : int , ) -> List[DPRSpanPrediction]:
a = []
for start_index, start_score in enumerate(__lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
a = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase )
a = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
a = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCamelCase )
class snake_case__ (_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Union[str, Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : int = READER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : int = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Optional[Any] = DPRReaderTokenizer
| 107
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**snake_case_ )
_lowerCAmelCase : str = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309
| 0
|
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : int = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )
lowerCAmelCase : Optional[Any] = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
lowerCAmelCase : int = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowerCAmelCase : Union[str, Any] = v
else:
lowerCAmelCase : int = v
lowerCAmelCase : List[Any] = chkpt["params"]
lowerCAmelCase : Tuple = {n: v for n, v in config.items() if not isinstance(SCREAMING_SNAKE_CASE , (torch.FloatTensor, numpy.ndarray) )}
lowerCAmelCase : List[str] = chkpt["dico_word2id"]
lowerCAmelCase : int = {s + "</w>" if s.find("@@" ) == -1 and i > 1_3 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
lowerCAmelCase : List[Any] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
lowerCAmelCase : List[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
lowerCAmelCase : Dict = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) + "\n" )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) + "\n" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 108
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: Tuple = logging.get_logger(__name__)
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
UpperCAmelCase : int = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm1.weight", F"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm1.bias", F"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.weight", F"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.bias", F"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm2.weight", F"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm2.bias", F"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.weight", F"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.bias", F"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc2.weight", F"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.mlp.fc2.bias", F"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
UpperCAmelCase : Optional[int] = state_dict.pop(F"encoder.deit.blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : str = in_proj_weight[
: encoder_config.hidden_size, :
]
UpperCAmelCase : Dict = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
UpperCAmelCase : Optional[int] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def _snake_case ( UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : int ):
UpperCAmelCase : Optional[int] = dct.pop(UpperCamelCase )
UpperCAmelCase : str = val
def _snake_case ( UpperCamelCase : Any ):
if "handwritten" in checkpoint_url:
UpperCAmelCase : int = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCAmelCase : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
UpperCAmelCase : int = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : int ):
UpperCAmelCase : Dict = ViTConfig(image_size=384 , qkv_bias=UpperCamelCase )
UpperCAmelCase : List[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
UpperCAmelCase : List[Any] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
UpperCAmelCase : Tuple = 1024
UpperCAmelCase : Any = 4096
UpperCAmelCase : List[str] = 24
UpperCAmelCase : str = 16
UpperCAmelCase : Optional[Any] = 1024
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : int = """relu"""
UpperCAmelCase : Dict = 1024
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[str] = False
UpperCAmelCase : Any = False
# load HuggingFace model
UpperCAmelCase : Any = ViTModel(UpperCamelCase , add_pooling_layer=UpperCamelCase )
UpperCAmelCase : Any = TrOCRForCausalLM(UpperCamelCase )
UpperCAmelCase : Dict = VisionEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase )
model.eval()
# load state_dict of original model, rename some keys
UpperCAmelCase : str = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" , check_hash=UpperCamelCase )["""model"""]
UpperCAmelCase : str = create_rename_keys(UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
UpperCAmelCase : Any = state_dict.pop(UpperCamelCase )
if key.startswith("""decoder""" ) and "output_projection" not in key:
UpperCAmelCase : Union[str, Any] = val
else:
UpperCAmelCase : List[str] = val
# load state dict
model.load_state_dict(UpperCamelCase )
# Check outputs on an image
UpperCAmelCase : Optional[Any] = ViTImageProcessor(size=encoder_config.image_size )
UpperCAmelCase : List[str] = RobertaTokenizer.from_pretrained("""roberta-large""" )
UpperCAmelCase : Union[str, Any] = TrOCRProcessor(UpperCamelCase , UpperCamelCase )
UpperCAmelCase : Tuple = processor(images=prepare_img(UpperCamelCase ) , return_tensors="""pt""" ).pixel_values
# verify logits
UpperCAmelCase : Dict = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
UpperCAmelCase : Dict = model(pixel_values=UpperCamelCase , decoder_input_ids=UpperCamelCase )
UpperCAmelCase : List[Any] = outputs.logits
UpperCAmelCase : Optional[int] = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
UpperCAmelCase : Optional[Any] = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
UpperCAmelCase : Optional[int] = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
UpperCAmelCase : Tuple = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
UpperCAmelCase : Tuple = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , UpperCamelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A: List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A: List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 109
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=5_6 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=7 , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=4 , _UpperCAmelCase="block_sparse" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=3 , ) -> Optional[Any]:
__UpperCamelCase : int = parent
__UpperCamelCase : Tuple = batch_size
__UpperCamelCase : List[Any] = seq_length
__UpperCamelCase : int = is_training
__UpperCamelCase : Optional[Any] = use_attention_mask
__UpperCamelCase : Tuple = use_token_type_ids
__UpperCamelCase : List[Any] = use_labels
__UpperCamelCase : Dict = vocab_size
__UpperCamelCase : Optional[int] = hidden_size
__UpperCamelCase : List[Any] = num_hidden_layers
__UpperCamelCase : Dict = num_attention_heads
__UpperCamelCase : Tuple = intermediate_size
__UpperCamelCase : List[Any] = hidden_act
__UpperCamelCase : List[Any] = hidden_dropout_prob
__UpperCamelCase : int = attention_probs_dropout_prob
__UpperCamelCase : Tuple = max_position_embeddings
__UpperCamelCase : Optional[int] = type_vocab_size
__UpperCamelCase : Optional[int] = type_sequence_label_size
__UpperCamelCase : List[str] = initializer_range
__UpperCamelCase : str = num_choices
__UpperCamelCase : Tuple = rescale_embeddings
__UpperCamelCase : Tuple = attention_type
__UpperCamelCase : List[str] = use_bias
__UpperCamelCase : str = block_size
__UpperCamelCase : List[str] = num_random_blocks
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : List[str] = None
if self.use_attention_mask:
__UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : List[str] = None
if self.use_token_type_ids:
__UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : Any = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCamelCase : List[str] = config_and_inputs
__UpperCamelCase : Tuple = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class A ( _a , unittest.TestCase ):
'''simple docstring'''
A = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
A = False
A = False
def a_ (self ) -> List[Any]:
__UpperCamelCase : Union[str, Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a_ (self ) -> Optional[int]:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a_ (self ) -> Optional[Any]:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a_ (self ) -> Dict:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a_ (self ) -> int:
super().test_hidden_states_output()
@slow
def a_ (self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
__UpperCamelCase : Any = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(snake_case_ )
def a_ (self ) -> Tuple:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase : str = self._prepare_for_class(snake_case_ , snake_case_ )
__UpperCamelCase : Tuple = model_class(snake_case_ )
@jax.jit
def model_jitted(_UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ):
return model(input_ids=snake_case_ , attention_mask=snake_case_ , **snake_case_ )
with self.subTest("JIT Enabled" ):
__UpperCamelCase : Optional[Any] = model_jitted(**snake_case_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__UpperCamelCase : Any = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1E-5 , _UpperCAmelCase="outputs" , _UpperCAmelCase=None ) -> int:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
| 298
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase_ = 0
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase_ = tuple[int, int]
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = pos_x
_lowerCAmelCase : List[str] = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : List[Any] = goal_x
_lowerCAmelCase : int = goal_y
_lowerCAmelCase : Union[str, Any] = g_cost
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = self.calculate_heuristic()
_lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.pos_x - self.goal_x
_lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : int = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_lowerCAmelCase : List[str] = current_bwd_node
_lowerCAmelCase : Dict = current_fwd_node
_lowerCAmelCase : Any = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = AStar(init, goal)
UpperCamelCase_ = a_star.search()
UpperCamelCase_ = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalAStar(init, goal)
UpperCamelCase_ = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 309
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
a_ = logging.get_logger(__name__)
# General docstring
a_ = 'MobileNetV1Config'
# Base docstring
a_ = 'google/mobilenet_v1_1.0_224'
a_ = [1, 1_024, 7, 7]
# Image classification docstring
a_ = 'google/mobilenet_v1_1.0_224'
a_ = 'tabby, tabby cat'
a_ = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Optional[int]=None ):
UpperCamelCase_ : Optional[int] = {}
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ : Union[str, Any] = model.mobilenet_va
else:
UpperCamelCase_ : Tuple = model
UpperCamelCase_ : int = """MobilenetV1/Conv2d_0/"""
UpperCamelCase_ : int = backbone.conv_stem.convolution.weight
UpperCamelCase_ : int = backbone.conv_stem.normalization.bias
UpperCamelCase_ : Tuple = backbone.conv_stem.normalization.weight
UpperCamelCase_ : Any = backbone.conv_stem.normalization.running_mean
UpperCamelCase_ : List[str] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
UpperCamelCase_ : Any = i + 1
UpperCamelCase_ : str = i * 2
UpperCamelCase_ : Optional[Any] = backbone.layer[pt_index]
UpperCamelCase_ : Union[str, Any] = F"MobilenetV1/Conv2d_{tf_index}_depthwise/"
UpperCamelCase_ : Dict = pointer.convolution.weight
UpperCamelCase_ : int = pointer.normalization.bias
UpperCamelCase_ : Optional[int] = pointer.normalization.weight
UpperCamelCase_ : Tuple = pointer.normalization.running_mean
UpperCamelCase_ : Any = pointer.normalization.running_var
UpperCamelCase_ : int = backbone.layer[pt_index + 1]
UpperCamelCase_ : Dict = F"MobilenetV1/Conv2d_{tf_index}_pointwise/"
UpperCamelCase_ : List[Any] = pointer.convolution.weight
UpperCamelCase_ : str = pointer.normalization.bias
UpperCamelCase_ : Optional[Any] = pointer.normalization.weight
UpperCamelCase_ : Tuple = pointer.normalization.running_mean
UpperCamelCase_ : Tuple = pointer.normalization.running_var
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ : List[str] = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
UpperCamelCase_ : Optional[Any] = model.classifier.weight
UpperCamelCase_ : int = model.classifier.bias
return tf_to_pt_map
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : List[str] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
UpperCamelCase_ : Optional[int] = tf.train.list_variables(_lowerCamelCase )
UpperCamelCase_ : List[str] = {}
for name, shape in init_vars:
logger.info(F"Loading TF weight {name} with shape {shape}" )
UpperCamelCase_ : Optional[Any] = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ : Optional[int] = array
# Build TF to PyTorch weights loading map
UpperCamelCase_ : int = _build_tf_to_pytorch_map(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(F"Importing {name}" )
if name not in tf_weights:
logger.info(F"{name} not in tf pre-trained weights, skipping" )
continue
UpperCamelCase_ : Optional[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
UpperCamelCase_ : List[str] = np.transpose(_lowerCamelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
UpperCamelCase_ : Union[str, Any] = array.squeeze().transpose()
else:
UpperCamelCase_ : Union[str, Any] = np.transpose(_lowerCamelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(F"Initialize PyTorch weight {name} {array.shape}" )
UpperCamelCase_ : Union[str, Any] = torch.from_numpy(_lowerCamelCase )
tf_weights.pop(_lowerCamelCase , _lowerCamelCase )
tf_weights.pop(name + '/RMSProp' , _lowerCamelCase )
tf_weights.pop(name + '/RMSProp_1' , _lowerCamelCase )
tf_weights.pop(name + '/ExponentialMovingAverage' , _lowerCamelCase )
logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def __lowercase ( lowerCamelCase : torch.Tensor , lowerCamelCase : nn.Convad ):
UpperCamelCase_ : Tuple = features.shape[-2:]
UpperCamelCase_ : Any = conv_layer.stride
UpperCamelCase_ : Optional[Any] = conv_layer.kernel_size
if in_height % stride_height == 0:
UpperCamelCase_ : Union[str, Any] = max(kernel_height - stride_height , 0 )
else:
UpperCamelCase_ : Optional[int] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
UpperCamelCase_ : int = max(kernel_width - stride_width , 0 )
else:
UpperCamelCase_ : Tuple = max(kernel_width - (in_width % stride_width) , 0 )
UpperCamelCase_ : Dict = pad_along_width // 2
UpperCamelCase_ : Dict = pad_along_width - pad_left
UpperCamelCase_ : Optional[Any] = pad_along_height // 2
UpperCamelCase_ : Any = pad_along_height - pad_top
UpperCamelCase_ : Dict = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_lowerCamelCase , _lowerCamelCase , 'constant' , 0.0 )
class _lowercase ( nn.Module ):
def __init__( self : List[Any] , snake_case : str , snake_case : Optional[int] , snake_case : Tuple , snake_case : Optional[int] , snake_case : Union[str, Any] = 1 , snake_case : Union[str, Any] = 1 , snake_case : Any = False , snake_case : Union[str, Any] = True , snake_case : List[str] = True , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ : Any = config
if in_channels % groups != 0:
raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups." )
if out_channels % groups != 0:
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups." )
UpperCamelCase_ : int = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
UpperCamelCase_ : List[str] = nn.Convad(
in_channels=snake_case_ , out_channels=snake_case_ , kernel_size=snake_case_ , stride=snake_case_ , padding=snake_case_ , groups=snake_case_ , bias=snake_case_ , padding_mode='zeros' , )
if use_normalization:
UpperCamelCase_ : Union[str, Any] = nn.BatchNormad(
num_features=snake_case_ , eps=config.layer_norm_eps , momentum=0.9997 , affine=snake_case_ , track_running_stats=snake_case_ , )
else:
UpperCamelCase_ : Optional[int] = None
if use_activation:
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_ : List[Any] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , snake_case_ ):
UpperCamelCase_ : Optional[int] = ACTaFN[config.hidden_act]
else:
UpperCamelCase_ : int = config.hidden_act
else:
UpperCamelCase_ : Dict = None
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : List[str] ) -> str:
"""simple docstring"""
if self.config.tf_padding:
UpperCamelCase_ : int = apply_tf_padding(snake_case_ , self.convolution )
UpperCamelCase_ : Tuple = self.convolution(snake_case_ )
if self.normalization is not None:
UpperCamelCase_ : Dict = self.normalization(snake_case_ )
if self.activation is not None:
UpperCamelCase_ : Optional[Any] = self.activation(snake_case_ )
return features
class _lowercase ( _a ):
lowercase = MobileNetVaConfig
lowercase = load_tf_weights_in_mobilenet_va
lowercase = """mobilenet_v1"""
lowercase = """pixel_values"""
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : str ) -> List[str]:
"""simple docstring"""
if isinstance(snake_case_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(snake_case_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
a_ = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
a_ = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , _a , )
class _lowercase ( _a ):
def __init__( self : Optional[int] , snake_case : Dict , snake_case : List[str] = True ) -> List[Any]:
"""simple docstring"""
super().__init__(snake_case_ )
UpperCamelCase_ : Optional[int] = config
UpperCamelCase_ : List[Any] = 3_2
UpperCamelCase_ : Any = max(int(depth * config.depth_multiplier ) , config.min_depth )
UpperCamelCase_ : List[str] = MobileNetVaConvLayer(
snake_case_ , in_channels=config.num_channels , out_channels=snake_case_ , kernel_size=3 , stride=2 , )
UpperCamelCase_ : Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
UpperCamelCase_ : Tuple = nn.ModuleList()
for i in range(1_3 ):
UpperCamelCase_ : Tuple = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
UpperCamelCase_ : Any = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
snake_case_ , in_channels=snake_case_ , out_channels=snake_case_ , kernel_size=3 , stride=strides[i] , groups=snake_case_ , ) )
self.layer.append(
MobileNetVaConvLayer(
snake_case_ , in_channels=snake_case_ , out_channels=snake_case_ , kernel_size=1 , ) )
UpperCamelCase_ : int = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : str ) -> Tuple:
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Dict = None , snake_case : List[str] = None , snake_case : Tuple = None , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
UpperCamelCase_ : Dict = self.conv_stem(snake_case_ )
UpperCamelCase_ : Optional[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
UpperCamelCase_ : Any = layer_module(snake_case_ )
if output_hidden_states:
UpperCamelCase_ : Union[str, Any] = all_hidden_states + (hidden_states,)
UpperCamelCase_ : Union[str, Any] = hidden_states
if self.pooler is not None:
UpperCamelCase_ : List[Any] = torch.flatten(self.pooler(snake_case_ ) , start_dim=1 )
else:
UpperCamelCase_ : int = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case_ , pooler_output=snake_case_ , hidden_states=snake_case_ , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , _a , )
class _lowercase ( _a ):
def __init__( self : Union[str, Any] , snake_case : Dict ) -> int:
"""simple docstring"""
super().__init__(snake_case_ )
UpperCamelCase_ : Tuple = config.num_labels
UpperCamelCase_ : List[str] = MobileNetVaModel(snake_case_ )
UpperCamelCase_ : str = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
UpperCamelCase_ : Union[str, Any] = nn.Dropout(config.classifier_dropout_prob , inplace=snake_case_ )
UpperCamelCase_ : Dict = nn.Linear(snake_case_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : int = None , snake_case : Any = None , snake_case : Dict = None , snake_case : List[Any] = None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase_ : List[Any] = self.mobilenet_va(snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ )
UpperCamelCase_ : Dict = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase_ : Tuple = self.classifier(self.dropout(snake_case_ ) )
UpperCamelCase_ : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase_ : str = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase_ : Tuple = """single_label_classification"""
else:
UpperCamelCase_ : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
UpperCamelCase_ : Optional[int] = MSELoss()
if self.num_labels == 1:
UpperCamelCase_ : int = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase_ : List[Any] = loss_fct(snake_case_ , snake_case_ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase_ : str = CrossEntropyLoss()
UpperCamelCase_ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase_ : Union[str, Any] = BCEWithLogitsLoss()
UpperCamelCase_ : str = loss_fct(snake_case_ , snake_case_ )
if not return_dict:
UpperCamelCase_ : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=snake_case_ , logits=snake_case_ , hidden_states=outputs.hidden_states , )
| 175
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309
| 0
|
import copy
import random
from transformers import CLIPTokenizer
class __snake_case ( _a ):
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
super().__init__(*snake_case_ , **snake_case_ )
UpperCAmelCase : List[Any] ={}
def UpperCAmelCase__ ( self , snake_case__ , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] =super().add_tokens(snake_case_ , *snake_case_ , **snake_case_ )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
''' `placeholder_token` that is not already in the tokenizer.''' )
def UpperCAmelCase__ ( self , snake_case__ , *snake_case__ , snake_case__=1 , **snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : str =[]
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case_ , *snake_case_ , **snake_case_ )
output.append(snake_case_ )
else:
UpperCAmelCase : Optional[Any] =[]
for i in range(snake_case_ ):
UpperCAmelCase : Any =placeholder_token + f'''_{i}'''
self.try_adding_tokens(snake_case_ , *snake_case_ , **snake_case_ )
output.append(snake_case_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
UpperCAmelCase : int =output
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=False , snake_case__=1.0 ) -> Dict:
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase : List[Any] =[]
for i in range(len(snake_case_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
UpperCAmelCase : Optional[Any] =self.token_map[placeholder_token]
UpperCAmelCase : Union[str, Any] =tokens[: 1 + int(len(snake_case_ ) * prop_tokens_to_load )]
if vector_shuffle:
UpperCAmelCase : Dict =copy.copy(snake_case_ )
random.shuffle(snake_case_ )
UpperCAmelCase : Optional[Any] =text.replace(snake_case_ , ''' '''.join(snake_case_ ) )
return text
def __call__( self , snake_case__ , *snake_case__ , snake_case__=False , snake_case__=1.0 , **snake_case__ ) -> List[str]:
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case_ , vector_shuffle=snake_case_ , prop_tokens_to_load=snake_case_ ) , *snake_case_ , **snake_case_ , )
def UpperCAmelCase__ ( self , snake_case__ , *snake_case__ , snake_case__=False , snake_case__=1.0 , **snake_case__ ) -> List[str]:
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case_ , vector_shuffle=snake_case_ , prop_tokens_to_load=snake_case_ ) , *snake_case_ , **snake_case_ , )
| 348
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : complex , __lowerCAmelCase : str = "x" , __lowerCAmelCase : float = 1_0**-1_0 , __lowerCAmelCase : int = 1 , ):
a__ = symbols(_lowerCamelCase )
a__ = lambdify(_lowerCamelCase , _lowerCamelCase )
a__ = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) )
a__ = starting_point
while True:
if diff_function(_lowerCamelCase ) != 0:
a__ = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function(
_lowerCamelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
a__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 240
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309
| 0
|
import fire
from utils import calculate_rouge, save_json
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=None,**snake_case_ ):
_A : List[str] = [x.strip() for x in open(_lowerCamelCase ).readlines()]
_A : int = [x.strip() for x in open(_lowerCamelCase ).readlines()][: len(_lowerCamelCase )]
_A : int = calculate_rouge(_lowerCamelCase,_lowerCamelCase,**_lowerCamelCase )
if save_path is not None:
save_json(_lowerCamelCase,_lowerCamelCase,indent=_lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 26
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
| 0
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a__ = None
a__ = logging.get_logger(__name__)
a__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
a__ = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
a__ = {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
class snake_case ( _a ):
'''simple docstring'''
snake_case_ : Optional[Any] = VOCAB_FILES_NAMES
snake_case_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : Optional[Any] = ["""input_ids""", """attention_mask"""]
snake_case_ : List[str] = TaTokenizer
snake_case_ : List[int] = []
def __init__( self : Optional[int] , lowerCAmelCase : str=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict="</s>" , lowerCAmelCase : Tuple="<unk>" , lowerCAmelCase : List[Any]="<pad>" , lowerCAmelCase : List[Any]=100 , lowerCAmelCase : Optional[int]=None , **lowerCAmelCase : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_snake_case : int = [F'''<extra_id_{i}>''' for i in range(snake_case_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_snake_case : int = len(set(filter(lambda lowerCAmelCase: bool("""extra_id_""" in str(snake_case_)) , snake_case_)))
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""")
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , extra_ids=snake_case_ , additional_special_tokens=snake_case_ , **snake_case_ , )
_snake_case : Union[str, Any] = vocab_file
_snake_case : List[str] = False if not self.vocab_file else True
_snake_case : Any = extra_ids
@staticmethod
def UpperCamelCase_ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_snake_case : Tuple = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , snake_case_ , )
return max_model_length
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Any = None) -> Dict:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""")
if not os.path.isdir(snake_case_):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_snake_case : List[str] = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(snake_case_):
copyfile(self.vocab_file , snake_case_)
logger.info(F'''Copy vocab file to {out_vocab_file}''')
return (out_vocab_file,)
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : str = None) -> Tuple:
"""simple docstring"""
_snake_case : Any = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_snake_case : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Any = None) -> Dict:
"""simple docstring"""
_snake_case : Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def UpperCamelCase_ ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
return list(
set(filter(lambda lowerCAmelCase: bool(re.search(r"""<extra_id_\d+>""" , snake_case_)) is not None , self.additional_special_tokens)))
def UpperCamelCase_ ( self : List[Any]) -> List[Any]:
"""simple docstring"""
return [self.convert_tokens_to_ids(snake_case_) for token in self.get_sentinel_tokens()]
| 317
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 0
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=_lowerCamelCase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=_lowerCamelCase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=_lowerCamelCase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=_lowerCamelCase , default=0 , help='cuda_id.' , )
UpperCAmelCase_ : str = parser.parse_args()
return args
def lowercase__ ( __snake_case : List[str] , __snake_case : Dict , __snake_case : List[str] ):
'''simple docstring'''
if not len(_lowerCamelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
UpperCAmelCase_ : Union[str, Any] = imgs[0].size
UpperCAmelCase_ : Tuple = Image.new('RGB' , size=(cols * w, rows * h) )
UpperCAmelCase_ : Any = grid.size
for i, img in enumerate(_lowerCamelCase ):
grid.paste(_lowerCamelCase , box=(i % cols * w, i // cols * h) )
return grid
def lowercase__ ( __snake_case : List[str] , __snake_case : Optional[int]="robotic cat with wings" , __snake_case : List[Any]=7.5 , __snake_case : List[Any]=50 , __snake_case : Optional[Any]=1 , __snake_case : List[str]=42 , ):
'''simple docstring'''
UpperCAmelCase_ : int = torch.Generator(pipeline.device ).manual_seed(_lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = pipeline(
_lowerCamelCase , guidance_scale=_lowerCamelCase , num_inference_steps=_lowerCamelCase , generator=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , ).images
UpperCAmelCase_ : int = int(math.sqrt(_lowerCamelCase ) )
UpperCAmelCase_ : List[str] = image_grid(_lowerCamelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__UpperCAmelCase = parse_args()
# Load models and create wrapper for stable diffusion
__UpperCAmelCase = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
__UpperCAmelCase = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
__UpperCAmelCase = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
__UpperCAmelCase = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
__UpperCAmelCase = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__UpperCAmelCase = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
__UpperCAmelCase = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
__UpperCAmelCase = unet.to(torch.device('cuda', args.cuda_id))
__UpperCAmelCase = pipeline.to(unet.device)
__UpperCAmelCase , __UpperCAmelCase = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
__UpperCAmelCase = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 29
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# ===== initialization =====
_lowerCAmelCase : Tuple = Mock()
_lowerCAmelCase : Any = conn, Mock()
_lowerCAmelCase : Optional[Any] = iter([1, None] )
_lowerCAmelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 309
| 0
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
A_ : List[str] = logging.get_logger()
@dataclass
class lowerCamelCase :
lowerCamelCase__ : nn.Module
lowerCamelCase__ : List[nn.Module] = field(default_factory=_a )
lowerCamelCase__ : list = field(default_factory=_a )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = len(list(m.modules() ) ) == 1 or isinstance(snake_case_ , nn.Convad ) or isinstance(snake_case_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case_ )
def __call__( self : str , __UpperCAmelCase : int ) -> List[str]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case_ )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : nn.Module
lowerCamelCase__ : nn.Module
lowerCamelCase__ : int = 0
lowerCamelCase__ : List = field(default_factory=_a )
lowerCamelCase__ : List = field(default_factory=_a )
def __call__( self : int , __UpperCAmelCase : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = Tracker(self.dest )(snake_case_ ).parametrized
SCREAMING_SNAKE_CASE__ = Tracker(self.src )(snake_case_ ).parametrized
SCREAMING_SNAKE_CASE__ = list(filter(lambda __UpperCAmelCase : type(snake_case_ ) not in self.src_skip , snake_case_ ) )
SCREAMING_SNAKE_CASE__ = list(filter(lambda __UpperCAmelCase : type(snake_case_ ) not in self.dest_skip , snake_case_ ) )
if len(snake_case_ ) != len(snake_case_ ):
raise Exception(
F"""Numbers of operations are different. Source module has {len(snake_case_ )} operations while"""
F""" destination module has {len(snake_case_ )}.""" )
for dest_m, src_m in zip(snake_case_ , snake_case_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = True ):
'''simple docstring'''
print(f"""Converting {name}...""" )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase ).eval()
SCREAMING_SNAKE_CASE__ = ResNetForImageClassification(_lowerCamelCase ).eval()
SCREAMING_SNAKE_CASE__ = ModuleTransfer(src=_lowerCamelCase , dest=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(_lowerCamelCase )
assert torch.allclose(from_model(_lowerCamelCase ) , our_model(_lowerCamelCase ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE__ = f"""resnet{"-".join(name.split("resnet" ) )}"""
print(_lowerCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=_lowerCamelCase , )
# we can use the convnext one
SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=_lowerCamelCase , )
print(f"""Pushed {checkpoint_name}""" )
def A ( snake_case__ , snake_case__ = None , snake_case__ = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE__ = 10_00
SCREAMING_SNAKE_CASE__ = (1, num_labels)
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = partial(_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(_lowerCamelCase , names_to_config[model_name] , _lowerCamelCase , _lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
A_ : Union[str, Any] = parser.parse_args()
A_ : List[Any] = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 165
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 271
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309
| 0
|
from __future__ import annotations
from fractions import Fraction
def A ( _lowercase , _lowercase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : List[Any] = 11
SCREAMING_SNAKE_CASE : Optional[int] = int('''1''' + '''0''' * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : List[Any] = 10
return solutions
def A ( _lowercase = 2 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 182
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
| 0
|
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> str:
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
A__ : Optional[Any] =deepcopy(snake_case_ )
elif os.path.exists(snake_case_ ):
with io.open(snake_case_ , """r""" , encoding="""utf-8""" ) as f:
A__ : Optional[Any] =json.load(snake_case_ )
else:
try:
A__ : Any =baseaa.urlsafe_baadecode(snake_case_ ).decode("""utf-8""" )
A__ : Tuple =json.loads(snake_case_ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}" )
A__ : Any =config
self.set_stage_and_offload()
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
A__ : Tuple =self.get_value("""zero_optimization.stage""" , -1 )
# offload
A__ : Any =False
if self.is_zeroa() or self.is_zeroa():
A__ : List[Any] =set(["""cpu""", """nvme"""] )
A__ : Optional[Any] =set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
A__ : List[str] =True
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> int:
'''simple docstring'''
A__ : Union[str, Any] =self.config
# find the config node of interest if it exists
A__ : List[str] =ds_key_long.split(""".""" )
A__ : List[Any] =nodes.pop()
for node in nodes:
A__ : Dict =config.get(snake_case_ )
if config is None:
return None, ds_key
return config, ds_key
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any]=None ) -> Dict:
'''simple docstring'''
A__ : Any =self.find_config_node(snake_case_ )
if config is None:
return default
return config.get(snake_case_ , snake_case_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any]=False ) -> Optional[int]:
'''simple docstring'''
A__ : int =self.config
# find the config node of interest if it exists
A__ : Tuple =ds_key_long.split(""".""" )
for node in nodes:
A__ : List[str] =config
A__ : int =config.get(snake_case_ )
if config is None:
if must_exist:
raise ValueError(f"Can\'t find {ds_key_long} entry in the config: {self.config}" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(snake_case_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
A__ : Any =self.get_value(snake_case_ )
return False if value is None else bool(snake_case_ )
def lowercase__ ( self : int , lowerCAmelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
A__ : Dict =self.get_value(snake_case_ )
return False if value is None else not bool(snake_case_ )
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
return self._stage == 2
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
return self._stage == 3
def lowercase__ ( self : str ) -> Any:
'''simple docstring'''
return self._offload
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Optional[int] ) -> List[str]:
'''simple docstring'''
A__ : Dict =engine
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
# runs backpropagation and handles mixed precision
self.engine.backward(snake_case_ , **snake_case_ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class lowerCamelCase ( _a ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
super().__init__(snake_case_ , device_placement=snake_case_ , scaler=snake_case_ )
A__ : Dict =hasattr(self.optimizer , """overflow""" )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : str=None ) -> List[Any]:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class lowerCamelCase ( _a ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(snake_case_ , snake_case_ )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any]=0.001 , lowerCAmelCase_ : Tuple=0 , **lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Any =params
A__ : Optional[int] =lr
A__ : List[Any] =weight_decay
A__ : int =kwargs
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[Any]=0 , **lowerCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
A__ : Any =optimizer
A__ : Any =total_num_steps
A__ : str =warmup_num_steps
A__ : int =kwargs
| 134
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.