code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import numpy as np
class _A :
"""simple docstring"""
def __init__( self : Any ) -> Tuple:
__UpperCAmelCase =(0, 0)
__UpperCAmelCase =None
__UpperCAmelCase =0
__UpperCAmelCase =0
__UpperCAmelCase =0
def __eq__( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> List[Any]:
return self.position == cell.position
def _a ( self : str ) -> Any:
print(self.position )
class _A :
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=(5, 5) ) -> int:
__UpperCAmelCase =np.zeros(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =world_size[0]
__UpperCAmelCase =world_size[1]
def _a ( self : Optional[Any] ) -> Tuple:
print(self.w )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Tuple ) -> int:
__UpperCAmelCase =[
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__UpperCAmelCase =cell.position[0]
__UpperCAmelCase =cell.position[1]
__UpperCAmelCase =[]
for n in neughbour_cord:
__UpperCAmelCase =current_x + n[0]
__UpperCAmelCase =current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__UpperCAmelCase =Cell()
__UpperCAmelCase =(x, y)
__UpperCAmelCase =cell
neighbours.append(__SCREAMING_SNAKE_CASE )
return neighbours
def lowercase__ ( A_: Dict , A_: Tuple , A_: Tuple ) -> str:
"""simple docstring"""
__UpperCAmelCase =[]
__UpperCAmelCase =[]
_open.append(A_ )
while _open:
__UpperCAmelCase =np.argmin([n.f for n in _open] )
__UpperCAmelCase =_open[min_f]
_closed.append(_open.pop(A_ ) )
if current == goal:
break
for n in world.get_neigbours(A_ ):
for c in _closed:
if c == n:
continue
__UpperCAmelCase =current.g + 1
__UpperCAmelCase , __UpperCAmelCase =n.position
__UpperCAmelCase , __UpperCAmelCase =goal.position
__UpperCAmelCase =(ya - ya) ** 2 + (xa - xa) ** 2
__UpperCAmelCase =n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(A_ )
__UpperCAmelCase =[]
while current.parent is not None:
path.append(current.position )
__UpperCAmelCase =current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__A = Gridworld()
# Start position and goal
__A = Cell()
__A = (0, 0)
__A = Cell()
__A = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
__A = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__A = 1
print(world.w)
| 68 |
from __future__ import annotations
import bisect
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
__UpperCAmelCase =len(A_ )
while lo < hi:
__UpperCAmelCase =lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__UpperCAmelCase =mid + 1
else:
__UpperCAmelCase =mid
return lo
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
__UpperCAmelCase =len(A_ )
while lo < hi:
__UpperCAmelCase =lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__UpperCAmelCase =mid + 1
else:
__UpperCAmelCase =mid
return lo
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_left(A_ , A_ , A_ , A_ ) , A_ )
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_right(A_ , A_ , A_ , A_ ) , A_ )
def lowercase__ ( A_: list[int] , A_: int ) -> int | None:
"""simple docstring"""
__UpperCAmelCase =0
__UpperCAmelCase =len(A_ ) - 1
while left <= right:
__UpperCAmelCase =left + (right - left) // 2
__UpperCAmelCase =sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__UpperCAmelCase =midpoint - 1
else:
__UpperCAmelCase =midpoint + 1
return None
def lowercase__ ( A_: list[int] , A_: int ) -> int | None:
"""simple docstring"""
__UpperCAmelCase =bisect.bisect_left(A_ , A_ )
if index != len(A_ ) and sorted_collection[index] == item:
return index
return None
def lowercase__ ( A_: list[int] , A_: int , A_: int , A_: int ) -> int | None:
"""simple docstring"""
if right < left:
return None
__UpperCAmelCase =left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(A_ , A_ , A_ , midpoint - 1 )
else:
return binary_search_by_recursion(A_ , A_ , midpoint + 1 , A_ )
if __name__ == "__main__":
__A = input("Enter numbers separated by comma:\n").strip()
__A = sorted(int(item) for item in user_input.split(","))
__A = int(input("Enter a single number to be found in the list:\n"))
__A = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 68 | 1 |
def lowercase__ ( A_: str ) -> list:
"""simple docstring"""
__UpperCAmelCase =[0] * len(A_ )
for i in range(1 , len(A_ ) ):
# use last results for better performance - dynamic programming
__UpperCAmelCase =prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__UpperCAmelCase =prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__UpperCAmelCase =j
return prefix_result
def lowercase__ ( A_: str ) -> int:
"""simple docstring"""
return max(prefix_function(A_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
from typing import List
from .keymap import KEYMAP, get_character
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
def decorator(A_: int ):
__UpperCAmelCase =getattr(A_ , """handle_key""" , [] )
handle += [key]
setattr(A_ , """handle_key""" , A_ )
return func
return decorator
def lowercase__ ( *A_: List[str] ) -> Optional[int]:
"""simple docstring"""
def decorator(A_: Tuple ):
__UpperCAmelCase =getattr(A_ , """handle_key""" , [] )
handle += keys
setattr(A_ , """handle_key""" , A_ )
return func
return decorator
class _A ( UpperCamelCase ):
"""simple docstring"""
def __new__( cls : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> int:
__UpperCAmelCase =super().__new__(cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not hasattr(__SCREAMING_SNAKE_CASE , """key_handler""" ):
setattr(__SCREAMING_SNAKE_CASE , """key_handler""" , {} )
setattr(__SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] )
for key in handled_keys:
__UpperCAmelCase =value
return new_cls
@staticmethod
def _a ( cls : Dict ) -> List[Any]:
__UpperCAmelCase =get_character()
if char != KEYMAP["undefined"]:
__UpperCAmelCase =ord(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =cls.key_handler.get(__SCREAMING_SNAKE_CASE )
if handler:
__UpperCAmelCase =char
return handler(cls )
else:
return None
def lowercase__ ( cls: str ) -> int:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 68 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class _A ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'bit'
lowerCamelCase : List[Any] = ['preactivation', 'bottleneck']
lowerCamelCase : List[Any] = ['SAME', 'VALID']
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : Tuple=[256, 512, 1024, 2048] , __SCREAMING_SNAKE_CASE : Optional[Any]=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE : Optional[Any]="preactivation" , __SCREAMING_SNAKE_CASE : int="relu" , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=32 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : List[str]=1 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Tuple , ) -> List[str]:
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__UpperCAmelCase =global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
__UpperCAmelCase =num_channels
__UpperCAmelCase =embedding_size
__UpperCAmelCase =hidden_sizes
__UpperCAmelCase =depths
__UpperCAmelCase =layer_type
__UpperCAmelCase =hidden_act
__UpperCAmelCase =global_padding
__UpperCAmelCase =num_groups
__UpperCAmelCase =drop_path_rate
__UpperCAmelCase =embedding_dynamic_padding
__UpperCAmelCase =output_stride
__UpperCAmelCase =width_factor
__UpperCAmelCase =["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__UpperCAmelCase , __UpperCAmelCase =get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 68 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 68 | 1 |
from manim import *
class _A ( UpperCamelCase ):
"""simple docstring"""
def _a ( self : int ) -> Dict:
__UpperCAmelCase =Rectangle(height=0.5 , width=0.5 )
__UpperCAmelCase =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__UpperCAmelCase =Rectangle(height=0.25 , width=0.25 )
__UpperCAmelCase =[mem.copy() for i in range(6 )]
__UpperCAmelCase =[mem.copy() for i in range(6 )]
__UpperCAmelCase =VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
__UpperCAmelCase =VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
__UpperCAmelCase =VGroup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
__UpperCAmelCase =Text("""CPU""" , font_size=24 )
__UpperCAmelCase =Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =[mem.copy() for i in range(4 )]
__UpperCAmelCase =VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
__UpperCAmelCase =Text("""GPU""" , font_size=24 )
__UpperCAmelCase =Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =[mem.copy() for i in range(6 )]
__UpperCAmelCase =VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
__UpperCAmelCase =Text("""Model""" , font_size=24 )
__UpperCAmelCase =Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =[]
__UpperCAmelCase =[]
for i, rect in enumerate(__SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =fill.copy().set_fill(__SCREAMING_SNAKE_CASE , opacity=0.8 )
target.move_to(__SCREAMING_SNAKE_CASE )
model_arr.append(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__SCREAMING_SNAKE_CASE , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__SCREAMING_SNAKE_CASE )
self.add(*__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =[meta_mem.copy() for i in range(6 )]
__UpperCAmelCase =[meta_mem.copy() for i in range(6 )]
__UpperCAmelCase =VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
__UpperCAmelCase =VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
__UpperCAmelCase =VGroup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
__UpperCAmelCase =Text("""Disk""" , font_size=24 )
__UpperCAmelCase =Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
disk.move_to([-4, -1.25, 0] )
self.add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCAmelCase =MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =Square(0.3 )
input.set_fill(__SCREAMING_SNAKE_CASE , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __SCREAMING_SNAKE_CASE , buff=0.5 )
self.play(Write(__SCREAMING_SNAKE_CASE ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__SCREAMING_SNAKE_CASE , buff=0.02 )
self.play(MoveToTarget(__SCREAMING_SNAKE_CASE ) )
self.play(FadeOut(__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =Arrow(start=__SCREAMING_SNAKE_CASE , end=__SCREAMING_SNAKE_CASE , color=__SCREAMING_SNAKE_CASE , buff=0.5 )
a.next_to(model_arr[0].get_left() , __SCREAMING_SNAKE_CASE , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__UpperCAmelCase =MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE , run_time=3 ) )
__UpperCAmelCase ={"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(__SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(model_cpu_arr[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__UpperCAmelCase =a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __SCREAMING_SNAKE_CASE , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
__UpperCAmelCase =AnimationGroup(
FadeOut(__SCREAMING_SNAKE_CASE , run_time=0.5 ) , MoveToTarget(__SCREAMING_SNAKE_CASE , run_time=0.5 ) , FadeIn(__SCREAMING_SNAKE_CASE , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__SCREAMING_SNAKE_CASE )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__UpperCAmelCase =0.7
self.play(
Circumscribe(model_arr[i] , **__SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i] , **__SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i + 1] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[i + 1] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[-1] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__UpperCAmelCase =a_c
__UpperCAmelCase =a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__SCREAMING_SNAKE_CASE ) , FadeOut(__SCREAMING_SNAKE_CASE , run_time=0.5 ) , )
__UpperCAmelCase =MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE , run_time=3 ) , MoveToTarget(__SCREAMING_SNAKE_CASE ) )
self.wait()
| 68 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]=99 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=4 , ) -> Optional[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =seq_length
__UpperCAmelCase =is_training
__UpperCAmelCase =use_attention_mask
__UpperCAmelCase =use_token_type_ids
__UpperCAmelCase =use_labels
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =num_choices
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase =None
if self.use_attention_mask:
__UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase =None
if self.use_token_type_ids:
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase =True
__UpperCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =FlaxRobertaModelTester(self )
@slow
def _a ( self : Optional[Any] ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase =model_class_name.from_pretrained("""roberta-base""" , from_pt=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 68 | 1 |
import argparse
import copy
def lowercase__ ( A_: Tuple ) -> Dict:
"""simple docstring"""
__UpperCAmelCase ={}
with open(A_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__UpperCAmelCase =[]
_list.append([line.split()[1], line.split()[2]] )
__UpperCAmelCase =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__UpperCAmelCase =[]
_list.append([line.split()[0], line.split()[2]] )
__UpperCAmelCase =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase__ ( A_: str , A_: List[str] ) -> Optional[Any]:
"""simple docstring"""
with open(A_ ) as f:
__UpperCAmelCase =f.read(1 )
__UpperCAmelCase =start_node
__UpperCAmelCase =[]
__UpperCAmelCase =start_node
__UpperCAmelCase =0
while visiting not in first_solution:
__UpperCAmelCase =10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(A_ ) and k[0] not in first_solution:
__UpperCAmelCase =k[1]
__UpperCAmelCase =k[0]
first_solution.append(A_ )
__UpperCAmelCase =distance_of_first_solution + int(A_ )
__UpperCAmelCase =best_node
first_solution.append(A_ )
__UpperCAmelCase =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__UpperCAmelCase =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def lowercase__ ( A_: Any , A_: Optional[Any] ) -> str:
"""simple docstring"""
__UpperCAmelCase =[]
for n in solution[1:-1]:
__UpperCAmelCase =solution.index(A_ )
for kn in solution[1:-1]:
__UpperCAmelCase =solution.index(A_ )
if n == kn:
continue
__UpperCAmelCase =copy.deepcopy(A_ )
__UpperCAmelCase =kn
__UpperCAmelCase =n
__UpperCAmelCase =0
for k in _tmp[:-1]:
__UpperCAmelCase =_tmp[_tmp.index(A_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__UpperCAmelCase =distance + int(i[1] )
_tmp.append(A_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__UpperCAmelCase =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda A_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase__ ( A_: List[Any] , A_: List[str] , A_: Union[str, Any] , A_: Optional[Any] , A_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =1
__UpperCAmelCase =first_solution
__UpperCAmelCase =[]
__UpperCAmelCase =distance_of_first_solution
__UpperCAmelCase =solution
while count <= iters:
__UpperCAmelCase =find_neighborhood(A_ , A_ )
__UpperCAmelCase =0
__UpperCAmelCase =neighborhood[index_of_best_solution]
__UpperCAmelCase =len(A_ ) - 1
__UpperCAmelCase =False
while not found:
__UpperCAmelCase =0
while i < len(A_ ):
if best_solution[i] != solution[i]:
__UpperCAmelCase =best_solution[i]
__UpperCAmelCase =solution[i]
break
__UpperCAmelCase =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__UpperCAmelCase =True
__UpperCAmelCase =best_solution[:-1]
__UpperCAmelCase =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__UpperCAmelCase =cost
__UpperCAmelCase =solution
else:
__UpperCAmelCase =index_of_best_solution + 1
__UpperCAmelCase =neighborhood[index_of_best_solution]
if len(A_ ) >= size:
tabu_list.pop(0 )
__UpperCAmelCase =count + 1
return best_solution_ever, best_cost
def lowercase__ ( A_: Union[str, Any]=None ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase =generate_neighbours(args.File )
__UpperCAmelCase , __UpperCAmelCase =generate_first_solution(
args.File , A_ )
__UpperCAmelCase , __UpperCAmelCase =tabu_search(
A_ , A_ , A_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
__A = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 68 |
from __future__ import annotations
def lowercase__ ( A_: list[list[int]] ) -> int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 1 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase =torch.nn.Linear(2 , 4 )
__UpperCAmelCase =torch.optim.AdamW(model.parameters() , lr=1.0 )
__UpperCAmelCase =torch.optim.lr_scheduler.OneCycleLR(A_ , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 )
__UpperCAmelCase =DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__UpperCAmelCase =DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase__ ( A_: Optional[Any] ) -> str:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase__ ( A_: Union[str, Any] ) -> int:
"""simple docstring"""
__UpperCAmelCase =torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(A_ )
class _A ( UpperCamelCase ):
"""simple docstring"""
@require_cuda
def _a ( self : List[Any] ) -> Any:
__UpperCAmelCase =Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =Accelerator(cpu=__SCREAMING_SNAKE_CASE )
def _a ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase =Accelerator()
__UpperCAmelCase =GradientState()
assert state.num_steps == 1
__UpperCAmelCase =4
assert state.num_steps == 4
assert state.sync_gradients is True
__UpperCAmelCase =False
assert state.sync_gradients is False
GradientState._reset_state()
def _a ( self : List[str] ) -> Tuple:
__UpperCAmelCase =Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =create_components()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) =accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def _a ( self : Any ) -> str:
__UpperCAmelCase =Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =create_components()
accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def _a ( self : str ) -> Tuple:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ):
pass
with patch("""torch.cuda.set_device""" , __SCREAMING_SNAKE_CASE ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
__UpperCAmelCase =Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def _a ( self : Tuple ) -> str:
__UpperCAmelCase =Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =create_components()
accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =get_signature(__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__SCREAMING_SNAKE_CASE )
# make sure random weights don't match
load_random_weights(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) < 1e-3 )
def _a ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase =Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =create_components()
accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =get_signature(__SCREAMING_SNAKE_CASE )
# saving hook
def save_config(__SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] ):
__UpperCAmelCase ={"""class_name""": models[0].__class__.__name__}
with open(os.path.join(__SCREAMING_SNAKE_CASE , """data.json""" ) , """w""" ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# loading hook
def load_config(__SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] ):
with open(os.path.join(__SCREAMING_SNAKE_CASE , """data.json""" ) , """r""" ) as f:
__UpperCAmelCase =json.load(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =config["""class_name"""]
__UpperCAmelCase =accelerator.register_save_state_pre_hook(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =accelerator.register_load_state_pre_hook(__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__SCREAMING_SNAKE_CASE )
# make sure random weights don't match with hooks
load_random_weights(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) > 1e-3 )
# random class name to verify correct one is loaded
__UpperCAmelCase ="""random"""
# make sure loaded weights match with hooks
accelerator.load_state(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__SCREAMING_SNAKE_CASE )
# make sure random weights don't match with hooks removed
load_random_weights(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) > 1e-3 )
# random class name to verify correct one is loaded
__UpperCAmelCase ="""random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def _a ( self : str ) -> List[Any]:
__UpperCAmelCase =Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =create_components()
__UpperCAmelCase =None
# This should work
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertTrue(dummy_obj is None )
def _a ( self : Dict ) -> Any:
__UpperCAmelCase =Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =create_components()
__UpperCAmelCase =[1, 2, 3]
# This should work
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def _a ( self : Union[str, Any] ) -> str:
from transformers import AutoModelForCausalLM
__UpperCAmelCase =AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__SCREAMING_SNAKE_CASE , device_map={"""""": 0} , )
__UpperCAmelCase =Accelerator()
# This should work
__UpperCAmelCase =accelerator.prepare(__SCREAMING_SNAKE_CASE )
@slow
@require_bnb
def _a ( self : str ) -> str:
from transformers import AutoModelForCausalLM
__UpperCAmelCase =Accelerator()
with init_empty_weights():
__UpperCAmelCase =AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
__UpperCAmelCase =infer_auto_device_map(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""cpu"""
__UpperCAmelCase =AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=__SCREAMING_SNAKE_CASE , load_in_abit=__SCREAMING_SNAKE_CASE , llm_inta_enable_fpaa_cpu_offload=__SCREAMING_SNAKE_CASE )
# This should not work and get value error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =accelerator.prepare(__SCREAMING_SNAKE_CASE )
@slow
@require_bnb
@require_multi_gpu
def _a ( self : str ) -> Any:
from transformers import AutoModelForCausalLM
__UpperCAmelCase ={"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
__UpperCAmelCase =AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
__UpperCAmelCase =infer_auto_device_map(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =1
__UpperCAmelCase =AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__SCREAMING_SNAKE_CASE , device_map=__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =Accelerator()
# This should not work and get value error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =accelerator.prepare(__SCREAMING_SNAKE_CASE )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def _a ( self : Optional[Any] ) -> str:
from transformers import AutoModelForCausalLM
with init_empty_weights():
__UpperCAmelCase =AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
__UpperCAmelCase =infer_auto_device_map(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =1
__UpperCAmelCase =AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__SCREAMING_SNAKE_CASE , device_map=__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =Accelerator()
# This should work
__UpperCAmelCase =accelerator.prepare(__SCREAMING_SNAKE_CASE )
@require_cuda
def _a ( self : str ) -> List[Any]:
__UpperCAmelCase =torch.nn.Linear(10 , 10 )
__UpperCAmelCase =torch.optim.SGD(model.parameters() , lr=0.01 )
__UpperCAmelCase =Accelerator(cpu=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =accelerator.prepare(__SCREAMING_SNAKE_CASE )
| 68 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowercase__ ( A_: int , A_: int , A_: int , A_: int , A_: int , A_: int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__UpperCAmelCase =ksize + 1
__UpperCAmelCase =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(A_ ):
for x in range(A_ ):
# distance from center
__UpperCAmelCase =x - ksize // 2
__UpperCAmelCase =y - ksize // 2
# degree to radiant
__UpperCAmelCase =theta / 180 * np.pi
__UpperCAmelCase =np.cos(_theta )
__UpperCAmelCase =np.sin(_theta )
# get kernel x
__UpperCAmelCase =cos_theta * px + sin_theta * py
# get kernel y
__UpperCAmelCase =-sin_theta * px + cos_theta * py
# fill kernel
__UpperCAmelCase =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__A = imread("../image_data/lena.jpg")
# turn image in gray scale value
__A = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__A = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__A = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__A = out / out.max() * 2_55
__A = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 68 | 1 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _A :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=99 , __SCREAMING_SNAKE_CASE : int=13 , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Union[str, Any]=9 , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Any=32 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : Optional[int]=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Dict=8 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.002 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> List[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =encoder_seq_length
__UpperCAmelCase =decoder_seq_length
# For common tests
__UpperCAmelCase =self.decoder_seq_length
__UpperCAmelCase =is_training
__UpperCAmelCase =use_attention_mask
__UpperCAmelCase =use_labels
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =d_ff
__UpperCAmelCase =relative_attention_num_buckets
__UpperCAmelCase =dropout_rate
__UpperCAmelCase =initializer_factor
__UpperCAmelCase =eos_token_id
__UpperCAmelCase =pad_token_id
__UpperCAmelCase =decoder_start_token_id
__UpperCAmelCase =None
__UpperCAmelCase =decoder_layers
def _a ( self : Dict ) -> List[str]:
return TaConfig.from_pretrained("""google/umt5-base""" )
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , ) -> Any:
if attention_mask is None:
__UpperCAmelCase =input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__UpperCAmelCase =decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__UpperCAmelCase =torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__SCREAMING_SNAKE_CASE )
if decoder_head_mask is None:
__UpperCAmelCase =torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__SCREAMING_SNAKE_CASE )
if cross_attn_head_mask is None:
__UpperCAmelCase =torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__SCREAMING_SNAKE_CASE )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _a ( self : Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase =ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__UpperCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__UpperCAmelCase =input_ids.clamp(self.pad_token_id + 1 )
__UpperCAmelCase =decoder_input_ids.clamp(self.pad_token_id + 1 )
__UpperCAmelCase =self.get_config()
__UpperCAmelCase =config.num_attention_heads
__UpperCAmelCase =self.prepare_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, input_dict
def _a ( self : Optional[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase =self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self : str ) -> Optional[int]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _a ( self : Dict ) -> List[Any]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[Any]:
__UpperCAmelCase =UMTaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(
input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =result.last_hidden_state
__UpperCAmelCase =result.past_key_values
__UpperCAmelCase =result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__SCREAMING_SNAKE_CASE ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , ) -> Dict:
__UpperCAmelCase =UMTaModel(config=__SCREAMING_SNAKE_CASE ).get_decoder().to(__SCREAMING_SNAKE_CASE ).eval()
# first forward pass
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) + 1 )
__UpperCAmelCase , __UpperCAmelCase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase =ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__UpperCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
# select random slice
__UpperCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase =output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , ) -> Any:
__UpperCAmelCase =UMTaModel(config=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ).half().eval()
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__SCREAMING_SNAKE_CASE ).any().item() )
@require_torch
class _A ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowerCamelCase : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowerCamelCase : Tuple = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowerCamelCase : Dict = True
lowerCamelCase : Optional[Any] = False
lowerCamelCase : List[str] = False
lowerCamelCase : int = True
lowerCamelCase : Optional[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowerCamelCase : Dict = [0.8, 0.9]
def _a ( self : int ) -> Any:
__UpperCAmelCase =UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _a ( self : str ) -> Any:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase =UMTaModel(config_and_inputs[0] ).to(__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__SCREAMING_SNAKE_CASE , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=__SCREAMING_SNAKE_CASE , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _a ( self : int ) -> Dict:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__SCREAMING_SNAKE_CASE )
def _a ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase =["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase =config_and_inputs[0]
__UpperCAmelCase =UMTaForConditionalGeneration(__SCREAMING_SNAKE_CASE ).eval()
model.to(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__SCREAMING_SNAKE_CASE ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__SCREAMING_SNAKE_CASE ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__SCREAMING_SNAKE_CASE ),
}
for attn_name, (name, mask) in zip(__SCREAMING_SNAKE_CASE , head_masking.items() ):
__UpperCAmelCase ={name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__UpperCAmelCase =torch.ones(
config.num_decoder_layers , config.num_heads , device=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__SCREAMING_SNAKE_CASE , return_dict_in_generate=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__UpperCAmelCase =out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _a ( self : str ) -> str:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _a ( self : List[Any] ) -> Dict:
__UpperCAmelCase =UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__SCREAMING_SNAKE_CASE , legacy=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =[
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
__UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE ).input_ids
# fmt: off
__UpperCAmelCase =torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(input_ids.to(__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =[
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
__UpperCAmelCase =tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 68 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 2, 1] , __SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 4] , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Any=2.0 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Tuple=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : Dict=8 , ) -> List[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =image_size
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =embed_dim
__UpperCAmelCase =depths
__UpperCAmelCase =num_heads
__UpperCAmelCase =window_size
__UpperCAmelCase =mlp_ratio
__UpperCAmelCase =qkv_bias
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =drop_path_rate
__UpperCAmelCase =hidden_act
__UpperCAmelCase =use_absolute_embeddings
__UpperCAmelCase =patch_norm
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =initializer_range
__UpperCAmelCase =is_training
__UpperCAmelCase =scope
__UpperCAmelCase =use_labels
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =encoder_stride
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase =None
if self.use_labels:
__UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase =self.get_config()
return config, pixel_values, labels
def _a ( self : List[Any] ) -> Optional[Any]:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
__UpperCAmelCase =SwinvaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Tuple:
__UpperCAmelCase =SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase =1
__UpperCAmelCase =SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__UpperCAmelCase =self.type_sequence_label_size
__UpperCAmelCase =SwinvaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : List[str] ) -> Tuple:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCamelCase : Tuple = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Dict = False
lowerCamelCase : Tuple = False
lowerCamelCase : List[str] = False
lowerCamelCase : Tuple = False
def _a ( self : str ) -> str:
__UpperCAmelCase =SwinvaModelTester(self )
__UpperCAmelCase =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 )
def _a ( self : List[Any] ) -> Optional[int]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : str ) -> str:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def _a ( self : Tuple ) -> Tuple:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def _a ( self : Optional[Any] ) -> int:
pass
def _a ( self : Tuple ) -> int:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _a ( self : str ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase =[*signature.parameters.keys()]
__UpperCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =True
for model_class in self.all_model_classes:
__UpperCAmelCase =True
__UpperCAmelCase =False
__UpperCAmelCase =True
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
__UpperCAmelCase =len(self.model_tester.depths )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase =True
__UpperCAmelCase =config.window_size**2
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__UpperCAmelCase =len(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__UpperCAmelCase =True
__UpperCAmelCase =True
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
__UpperCAmelCase =self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__UpperCAmelCase =2
self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.hidden_states
__UpperCAmelCase =getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# Swinv2 has a different seq_length
__UpperCAmelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase =outputs.reshaped_hidden_states
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =reshaped_hidden_states[0].shape
__UpperCAmelCase =(
reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a ( self : str ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =3
__UpperCAmelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Dict:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _a ( self : int ) -> Dict:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase =SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _a ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =_config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ) -> Dict:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def _a ( self : int ) -> Optional[int]:
__UpperCAmelCase =SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.default_image_processor
__UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCAmelCase =image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__UpperCAmelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 68 | 1 |
class _A :
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : list ) -> None:
__UpperCAmelCase =set_counts
__UpperCAmelCase =max(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =len(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =[1] * num_sets
__UpperCAmelCase =list(range(__SCREAMING_SNAKE_CASE ) )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> bool:
__UpperCAmelCase =self.get_parent(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_parent(__SCREAMING_SNAKE_CASE )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__UpperCAmelCase =0
__UpperCAmelCase =dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__UpperCAmelCase =self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__UpperCAmelCase =0
__UpperCAmelCase =src_parent
__UpperCAmelCase =self.set_counts[src_parent]
__UpperCAmelCase =max(self.max_set , __SCREAMING_SNAKE_CASE )
return True
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
__UpperCAmelCase =self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 68 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "spiece.model"}
__A = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
__A = {
"AI-Sweden/gpt-sw3-126m": 20_48,
"AI-Sweden/gpt-sw3-350m": 20_48,
"AI-Sweden/gpt-sw3-1.6b": 20_48,
"AI-Sweden/gpt-sw3-6.7b": 20_48,
"AI-Sweden/gpt-sw3-20b": 20_48,
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
__UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCAmelCase =kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
__UpperCAmelCase ="""None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__UpperCAmelCase ="""<|endoftext|>""" if eos_token is None else eos_token
__UpperCAmelCase ="""<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__UpperCAmelCase =unk_token if pad_token is None else pad_token
__UpperCAmelCase =eos_token if bos_token is None else bos_token
else:
__UpperCAmelCase ="""<pad>""" if pad_token is None else pad_token
__UpperCAmelCase ="""<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =do_lower_case
__UpperCAmelCase =remove_space
__UpperCAmelCase =keep_accents
__UpperCAmelCase =vocab_file
__UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# Used for whitespace normalization in input texts
# fmt : off
__UpperCAmelCase ={""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__UpperCAmelCase =re.compile(
f'''[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__( self : Any ) -> str:
__UpperCAmelCase =self.__dict__.copy()
__UpperCAmelCase =None
return state
def __setstate__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCAmelCase ={}
__UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _a ( self : Union[str, Any] ) -> int:
return len(self.sp_model )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str ) -> str:
__UpperCAmelCase =self.non_printing_characters_re.sub("""""" , __SCREAMING_SNAKE_CASE )
# Normalize whitespaces
__UpperCAmelCase ="""""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
__UpperCAmelCase =unicodedata.normalize("""NFC""" , __SCREAMING_SNAKE_CASE )
return text
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
__UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE )
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> int:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> str:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
@staticmethod
def _a ( __SCREAMING_SNAKE_CASE : str ) -> str:
return out_string
def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> str:
__UpperCAmelCase =[]
__UpperCAmelCase =""""""
__UpperCAmelCase =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
__UpperCAmelCase =True
__UpperCAmelCase =[]
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string
def _a ( self : Any ) -> Dict[str, int]:
__UpperCAmelCase ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase =os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi:
__UpperCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase =[self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text]
__UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE )
if return_tensors is True or return_tensors == "pt":
__UpperCAmelCase =torch.tensor(__SCREAMING_SNAKE_CASE )
return token_ids
def _a ( self : str , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str:
return self.sp_model.decode(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]:
__UpperCAmelCase =[f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
__UpperCAmelCase =(
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(__SCREAMING_SNAKE_CASE ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=__SCREAMING_SNAKE_CASE )
| 68 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__A = logging.get_logger(__name__)
__A = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
__A = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__A = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__A = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
__A = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
__A = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
__A = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
__A = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
__A = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
__A = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
__A = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
__A = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
__A = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
__A = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = FLAX_MODEL_MAPPING
__A = auto_class_update(FlaxAutoModel)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__A = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : int = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__A = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Dict = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__A = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__A = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Any = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__A = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__A = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__A = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Dict = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__A = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Tuple = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__A = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : List[str] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__A = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 68 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__A = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Path , __SCREAMING_SNAKE_CASE : Union[str, None] = None , __SCREAMING_SNAKE_CASE : Union[List[str], None] = None , __SCREAMING_SNAKE_CASE : Union[str, List[str], None] = None , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[str]:
__UpperCAmelCase =[file for file in os.listdir(__SCREAMING_SNAKE_CASE ) if os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )]
if identifier is not None:
__UpperCAmelCase =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for n_ in n_identifier:
__UpperCAmelCase =[file for file in files if n_ not in file]
else:
__UpperCAmelCase =[file for file in files if n_identifier not in file]
__UpperCAmelCase =ignore_files or []
ignore_files.append("""__init__.py""" )
__UpperCAmelCase =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __SCREAMING_SNAKE_CASE )
if only_modules:
__UpperCAmelCase =file.split(""".""" )[0]
try:
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =doctest.DocTestSuite(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =unittest.TextTestRunner().run(__SCREAMING_SNAKE_CASE )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__UpperCAmelCase =doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _a ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""modeling"""
__UpperCAmelCase =[
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""tokenization"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""configuration"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ) -> Tuple:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase =["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , n_identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase =Path("""docs/source""" )
__UpperCAmelCase =["""favicon.ico"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE , only_modules=__SCREAMING_SNAKE_CASE )
| 68 | 1 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Dict = CpmAntTokenizer
lowerCamelCase : int = False
def _a ( self : Dict ) -> List[str]:
super().setUp()
__UpperCAmelCase =[
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
__UpperCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def _a ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase =CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
__UpperCAmelCase ="""今天天气真好!"""
__UpperCAmelCase =["""今天""", """天气""", """真""", """好""", """!"""]
__UpperCAmelCase =tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""今天天气真好!"""
__UpperCAmelCase =[tokenizer.bos_token] + tokens
__UpperCAmelCase =[6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 68 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__A = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase__ ( A_: int , A_: Optional[Any] , A_: List[str]=None ) -> List[str]:
"""simple docstring"""
if rng is None:
__UpperCAmelCase =random.Random()
__UpperCAmelCase =1
for dim in shape:
total_dims *= dim
__UpperCAmelCase =[]
for _ in range(A_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__UpperCAmelCase =np.array(A_ , dtype=jnp.intaa ).reshape(A_ )
return output
def lowercase__ ( A_: List[str] , A_: List[str]=None ) -> Any:
"""simple docstring"""
__UpperCAmelCase =ids_tensor(A_ , vocab_size=2 , rng=A_ )
# make sure that at least one token is attended to for each batch
__UpperCAmelCase =1
return attn_mask
@require_flax
class _A :
"""simple docstring"""
lowerCamelCase : Optional[Any] = None
lowerCamelCase : int = ()
def _a ( self : str ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__UpperCAmelCase =2
__UpperCAmelCase =inputs["""input_ids"""].shape[-1] // 2
__UpperCAmelCase =inputs["""input_ids"""][:max_batch_size, :sequence_length]
__UpperCAmelCase =jnp.ones_like(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__UpperCAmelCase =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__UpperCAmelCase =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _a ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =0
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =pt_model_class(__SCREAMING_SNAKE_CASE ).eval()
__UpperCAmelCase =load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , flax_model.params )
__UpperCAmelCase =flax_model.generate(__SCREAMING_SNAKE_CASE ).sequences
__UpperCAmelCase =pt_model.generate(torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__UpperCAmelCase =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _a ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
__UpperCAmelCase =0.8
__UpperCAmelCase =10
__UpperCAmelCase =0.3
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =2
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ) -> Any:
__UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
__UpperCAmelCase =FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__UpperCAmelCase ="""Hello world"""
__UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """do_samples""" ):
model.generate(__SCREAMING_SNAKE_CASE , do_samples=__SCREAMING_SNAKE_CASE )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """foo""" ):
__UpperCAmelCase ={"""foo""": """bar"""}
model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 68 | 1 |
from PIL import Image
def lowercase__ ( A_: Image , A_: int ) -> Image:
"""simple docstring"""
__UpperCAmelCase =(259 * (level + 255)) / (255 * (259 - level))
def contrast(A_: int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(A_ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
__A = change_contrast(img, 1_70)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 68 |
from __future__ import annotations
from collections.abc import Iterator
class _A :
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> None:
__UpperCAmelCase =value
__UpperCAmelCase =None
__UpperCAmelCase =None
class _A :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Node ) -> None:
__UpperCAmelCase =tree
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Node | None ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 1 |
class _A :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase ={}
def _a ( self : str ) -> None:
print(self.vertex )
for i in self.vertex:
print(__SCREAMING_SNAKE_CASE , """ -> """ , """ -> """.join([str(__SCREAMING_SNAKE_CASE ) for j in self.vertex[i]] ) )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__SCREAMING_SNAKE_CASE )
else:
# else make a new vertex
__UpperCAmelCase =[to_vertex]
def _a ( self : Tuple ) -> None:
# visited array for storing already visited nodes
__UpperCAmelCase =[False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list ) -> None:
# mark start vertex as visited
__UpperCAmelCase =True
print(__SCREAMING_SNAKE_CASE , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 68 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowercase__ ( A_: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A_ , AssumeRolePolicyDocument=json.dumps(A_ , indent=2 ) )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def lowercase__ ( A_: Dict ) -> Any:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
return iam_client.get_role(RoleName=A_ )["Role"]["Arn"]
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =_ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , A_ , )
__UpperCAmelCase =None
if credentials_configuration == 0:
__UpperCAmelCase =_ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
__UpperCAmelCase =aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__UpperCAmelCase =_ask_field("""AWS Access Key ID: """ )
__UpperCAmelCase =aws_access_key_id
__UpperCAmelCase =_ask_field("""AWS Secret Access Key: """ )
__UpperCAmelCase =aws_secret_access_key
__UpperCAmelCase =_ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
__UpperCAmelCase =aws_region
__UpperCAmelCase =_ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , A_ , )
if role_management == 0:
__UpperCAmelCase =_ask_field("""Enter your IAM role name: """ )
else:
__UpperCAmelCase ="""accelerate_sagemaker_execution_role"""
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(A_ )
__UpperCAmelCase =_ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_custom_docker_image:
__UpperCAmelCase =_ask_field("""Enter your Docker image: """ , lambda A_ : str(A_ ).lower() )
__UpperCAmelCase =_ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_inputs_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_metrics_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
__UpperCAmelCase ={}
__UpperCAmelCase =_ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
__UpperCAmelCase ="""dynamo_"""
__UpperCAmelCase =_ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__UpperCAmelCase =_ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
__UpperCAmelCase =_ask_options(
"""Which mode do you want to use?""" , A_ , lambda A_ : TORCH_DYNAMO_MODES[int(A_ )] , default="""default""" , )
__UpperCAmelCase =_ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase ="""Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__UpperCAmelCase =_ask_options(
A_ , A_ , lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__UpperCAmelCase =_ask_field(A_ , lambda A_ : str(A_ ).lower() , default="""ml.p3.2xlarge""" )
__UpperCAmelCase =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__UpperCAmelCase =_ask_field(
"""How many machines do you want use? [1]: """ , A_ , default=1 , )
__UpperCAmelCase =_ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=A_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A_ , use_cpu=A_ , dynamo_config=A_ , eca_instance_type=A_ , profile=A_ , region=A_ , iam_role_name=A_ , mixed_precision=A_ , num_machines=A_ , sagemaker_inputs_file=A_ , sagemaker_metrics_file=A_ , )
| 68 | 1 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__A = logging.getLogger(__name__)
class _A ( UpperCamelCase ):
"""simple docstring"""
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Optional[int]=None ) -> List[str]:
__UpperCAmelCase =self.layer[current_layer](__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , head_mask[current_layer] )
__UpperCAmelCase =layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , UpperCamelCase , )
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
super().__init__(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =BertEncoderWithPabee(__SCREAMING_SNAKE_CASE )
self.init_weights()
__UpperCAmelCase =0
__UpperCAmelCase =0
__UpperCAmelCase =0
__UpperCAmelCase =0
def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> int:
__UpperCAmelCase =threshold
def _a ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
__UpperCAmelCase =patience
def _a ( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase =0
__UpperCAmelCase =0
def _a ( self : List[str] ) -> str:
__UpperCAmelCase =self.inference_layers_num / self.inference_instances_num
__UpperCAmelCase =(
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(__SCREAMING_SNAKE_CASE )
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
def _a ( self : int , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Dict=False , ) -> str:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
__UpperCAmelCase =input_ids.size()
elif inputs_embeds is not None:
__UpperCAmelCase =inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
__UpperCAmelCase =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCAmelCase =torch.ones(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
if token_type_ids is None:
__UpperCAmelCase =torch.zeros(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCAmelCase =self.get_extended_attention_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =encoder_hidden_states.size()
__UpperCAmelCase =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__UpperCAmelCase =torch.ones(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.invert_attention_mask(__SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCAmelCase =self.get_head_mask(__SCREAMING_SNAKE_CASE , self.config.num_hidden_layers )
__UpperCAmelCase =self.embeddings(
input_ids=__SCREAMING_SNAKE_CASE , position_ids=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , inputs_embeds=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =embedding_output
if self.training:
__UpperCAmelCase =[]
for i in range(self.config.num_hidden_layers ):
__UpperCAmelCase =self.encoder.adaptive_forward(
__SCREAMING_SNAKE_CASE , current_layer=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.pooler(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =output_layers[i](output_dropout(__SCREAMING_SNAKE_CASE ) )
res.append(__SCREAMING_SNAKE_CASE )
elif self.patience == 0: # Use all layers for inference
__UpperCAmelCase =self.encoder(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =self.pooler(encoder_outputs[0] )
__UpperCAmelCase =[output_layers[self.config.num_hidden_layers - 1](__SCREAMING_SNAKE_CASE )]
else:
__UpperCAmelCase =0
__UpperCAmelCase =None
__UpperCAmelCase =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__UpperCAmelCase =self.encoder.adaptive_forward(
__SCREAMING_SNAKE_CASE , current_layer=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.pooler(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =output_layers[i](__SCREAMING_SNAKE_CASE )
if regression:
__UpperCAmelCase =logits.detach()
if patient_result is not None:
__UpperCAmelCase =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__UpperCAmelCase =0
else:
__UpperCAmelCase =logits.detach().argmax(dim=1 )
if patient_result is not None:
__UpperCAmelCase =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__SCREAMING_SNAKE_CASE ) ):
patient_counter += 1
else:
__UpperCAmelCase =0
__UpperCAmelCase =logits
if patient_counter == self.patience:
break
__UpperCAmelCase =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , UpperCamelCase , )
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Dict:
super().__init__(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =config.num_labels
__UpperCAmelCase =BertModelWithPabee(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =nn.Dropout(config.hidden_dropout_prob )
__UpperCAmelCase =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : int=None , ) -> List[Any]:
__UpperCAmelCase =self.bert(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , position_ids=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , inputs_embeds=__SCREAMING_SNAKE_CASE , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__UpperCAmelCase =(logits[-1],)
if labels is not None:
__UpperCAmelCase =None
__UpperCAmelCase =0
for ix, logits_item in enumerate(__SCREAMING_SNAKE_CASE ):
if self.num_labels == 1:
# We are doing regression
__UpperCAmelCase =MSELoss()
__UpperCAmelCase =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__UpperCAmelCase =CrossEntropyLoss()
__UpperCAmelCase =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__UpperCAmelCase =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__UpperCAmelCase =(total_loss / total_weights,) + outputs
return outputs
| 68 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'ctrl'
lowerCamelCase : Any = ['past_key_values']
lowerCamelCase : Optional[int] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=246534 , __SCREAMING_SNAKE_CASE : int=256 , __SCREAMING_SNAKE_CASE : Optional[Any]=1280 , __SCREAMING_SNAKE_CASE : Optional[Any]=8192 , __SCREAMING_SNAKE_CASE : int=48 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=1e-6 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , **__SCREAMING_SNAKE_CASE : int , ) -> Any:
__UpperCAmelCase =vocab_size
__UpperCAmelCase =n_positions
__UpperCAmelCase =n_embd
__UpperCAmelCase =n_layer
__UpperCAmelCase =n_head
__UpperCAmelCase =dff
__UpperCAmelCase =resid_pdrop
__UpperCAmelCase =embd_pdrop
__UpperCAmelCase =layer_norm_epsilon
__UpperCAmelCase =initializer_range
__UpperCAmelCase =use_cache
super().__init__(**__SCREAMING_SNAKE_CASE )
| 68 | 1 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__A = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
__A = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
__A = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
__A = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
__A = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
__A = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
__A = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def lowercase__ ( ) -> str:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase =randrange(len(A_ ) ), randrange(len(A_ ) )
__UpperCAmelCase =["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__UpperCAmelCase , __UpperCAmelCase =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowercase__ ( A_: int = 100 ) -> Any:
"""simple docstring"""
return (generate_random_hand() for _ in range(A_ ))
@pytest.mark.parametrize("""hand, expected""" , A_ )
def lowercase__ ( A_: int , A_: List[str] ) -> Tuple:
"""simple docstring"""
assert PokerHand(A_ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , A_ )
def lowercase__ ( A_: Any , A_: int ) -> List[Any]:
"""simple docstring"""
assert PokerHand(A_ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , A_ )
def lowercase__ ( A_: List[Any] , A_: str , A_: Tuple ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase =PokerHand(A_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , A_ )
def lowercase__ ( A_: Tuple , A_: Tuple ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A_ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , A_ )
def lowercase__ ( A_: int , A_: Tuple ) -> List[Any]:
"""simple docstring"""
assert PokerHand(A_ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , A_ )
def lowercase__ ( A_: Any , A_: Optional[int] , A_: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A_ ).compare_with(PokerHand(A_ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def lowercase__ ( A_: Tuple , A_: str , A_: str ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A_ ).compare_with(PokerHand(A_ ) ) == expected
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase =[PokerHand(A_ ) for hand in SORTED_HANDS]
__UpperCAmelCase =poker_hands.copy()
shuffle(A_ )
__UpperCAmelCase =chain(sorted(A_ ) )
for index, hand in enumerate(A_ ):
assert hand == poker_hands[index]
def lowercase__ ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase =[PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=A_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowercase__ ( ) -> int:
"""simple docstring"""
__UpperCAmelCase =PokerHand("""2C 4S AS 3D 5C""" )
__UpperCAmelCase =True
__UpperCAmelCase =[5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowercase__ ( ) -> str:
"""simple docstring"""
__UpperCAmelCase =0
__UpperCAmelCase =os.path.abspath(os.path.dirname(A_ ) )
__UpperCAmelCase =os.path.join(A_ , """poker_hands.txt""" )
with open(A_ ) as file_hand:
for line in file_hand:
__UpperCAmelCase =line[:14].strip()
__UpperCAmelCase =line[15:].strip()
__UpperCAmelCase , __UpperCAmelCase =PokerHand(A_ ), PokerHand(A_ )
__UpperCAmelCase =player.compare_with(A_ )
if output == "Win":
answer += 1
assert answer == 376
| 68 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowercase__ ( A_: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase =k.replace(A_ , A_ )
if k.startswith("""encoder""" ):
__UpperCAmelCase =k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowercase__ ( A_: Tuple ) -> str:
"""simple docstring"""
__UpperCAmelCase =[
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase =sd.pop(A_ )
__UpperCAmelCase =k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase =v
__A = ["START"]
@torch.no_grad()
def lowercase__ ( A_: List[Any] , A_: str , A_: int ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase =torch.load(A_ , map_location="""cpu""" )
__UpperCAmelCase =model["""model"""]
__UpperCAmelCase =BlenderbotConfig.from_json_file(A_ )
__UpperCAmelCase =BlenderbotForConditionalGeneration(A_ )
__UpperCAmelCase =m.model.state_dict().keys()
__UpperCAmelCase =[]
__UpperCAmelCase ={}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase =rename_state_dict_key(A_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase =v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(A_ )
m.model.load_state_dict(A_ , strict=A_ )
m.half()
m.save_pretrained(A_ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__A = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 68 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'vit_mae'
def __init__( self : str , __SCREAMING_SNAKE_CASE : List[Any]=768 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=12 , __SCREAMING_SNAKE_CASE : int=3072 , __SCREAMING_SNAKE_CASE : List[str]="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1e-12 , __SCREAMING_SNAKE_CASE : Any=224 , __SCREAMING_SNAKE_CASE : str=16 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=512 , __SCREAMING_SNAKE_CASE : Dict=8 , __SCREAMING_SNAKE_CASE : Dict=2048 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.75 , __SCREAMING_SNAKE_CASE : Any=False , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> Optional[Any]:
super().__init__(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =image_size
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =qkv_bias
__UpperCAmelCase =decoder_num_attention_heads
__UpperCAmelCase =decoder_hidden_size
__UpperCAmelCase =decoder_num_hidden_layers
__UpperCAmelCase =decoder_intermediate_size
__UpperCAmelCase =mask_ratio
__UpperCAmelCase =norm_pix_loss
| 68 |
from itertools import permutations
def lowercase__ ( A_: tuple ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__UpperCAmelCase =[7, 11, 13, 17]
for i, test in enumerate(A_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase__ ( A_: int = 10 ) -> int:
"""simple docstring"""
return sum(
int("""""".join(map(A_ , A_ ) ) )
for num in permutations(range(A_ ) )
if is_substring_divisible(A_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 68 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
__A = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
__A = "▁"
# Segments (not really needed)
__A = 0
__A = 1
__A = 2
__A = 3
__A = 4
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Any = 'left'
lowerCamelCase : int = XLNetTokenizer
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Optional[Any]="<s>" , __SCREAMING_SNAKE_CASE : Optional[int]="</s>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="<unk>" , __SCREAMING_SNAKE_CASE : List[str]="<sep>" , __SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<cls>" , __SCREAMING_SNAKE_CASE : Tuple="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Any]=["<eop>", "<eod>"] , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase =AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =3
__UpperCAmelCase =do_lower_case
__UpperCAmelCase =remove_space
__UpperCAmelCase =keep_accents
__UpperCAmelCase =vocab_file
__UpperCAmelCase =False if not self.vocab_file else True
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase =[self.sep_token_id]
__UpperCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase =[self.sep_token_id]
__UpperCAmelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase =os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 68 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__A = TypeVar("T")
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (position - 1) // 2
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (2 * position) + 1
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (2 * position) + 2
class _A ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] ) -> None:
__UpperCAmelCase =[]
__UpperCAmelCase ={}
__UpperCAmelCase =0
def __len__( self : str ) -> int:
return self.elements
def __repr__( self : Dict ) -> str:
return str(self.heap )
def _a ( self : Optional[int] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__UpperCAmelCase =self.elements
self.elements += 1
self._bubble_up(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__UpperCAmelCase , __UpperCAmelCase =self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__UpperCAmelCase , __UpperCAmelCase =self.heap[0]
self._bubble_down(__SCREAMING_SNAKE_CASE )
return elem
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Update the weight of the given key
__UpperCAmelCase =self.position_map[elem]
__UpperCAmelCase =(elem, weight)
if position > 0:
__UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__UpperCAmelCase =self.position_map[elem]
if curr_pos == 0:
return None
__UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos]
__UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_up(__SCREAMING_SNAKE_CASE )
return None
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__UpperCAmelCase =self.position_map[elem]
__UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos]
__UpperCAmelCase =get_child_left_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =get_child_right_position(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements and child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position]
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
return None
if child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
return None
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None:
# Swap the nodes at the given positions
__UpperCAmelCase =self.heap[nodea_pos][0]
__UpperCAmelCase =self.heap[nodea_pos][0]
__UpperCAmelCase , __UpperCAmelCase =(
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__UpperCAmelCase =nodea_pos
__UpperCAmelCase =nodea_pos
class _A ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] ) -> None:
__UpperCAmelCase ={}
__UpperCAmelCase =0
def __repr__( self : Tuple ) -> str:
return str(self.connections )
def __len__( self : str ) -> int:
return self.nodes
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__UpperCAmelCase ={}
self.nodes += 1
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__SCREAMING_SNAKE_CASE )
self.add_node(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =weight
__UpperCAmelCase =weight
def lowercase__ ( A_: GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
"""simple docstring"""
__UpperCAmelCase ={node: maxsize for node in graph.connections}
__UpperCAmelCase ={node: None for node in graph.connections}
__UpperCAmelCase =MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(A_ , A_ )
if priority_queue.is_empty():
return dist, parent
# initialization
__UpperCAmelCase =priority_queue.extract_min()
__UpperCAmelCase =0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A_ , dist[neighbour] )
__UpperCAmelCase =node
# running prim's algorithm
while not priority_queue.is_empty():
__UpperCAmelCase =priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A_ , dist[neighbour] )
__UpperCAmelCase =node
return dist, parent
| 68 | 1 |
def lowercase__ ( A_: int , A_: int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowercase__ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 68 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A = logging.get_logger(__name__)
@dataclass
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Any , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase =deprecated_arg[3:]
__UpperCAmelCase =not kwargs.pop(__SCREAMING_SNAKE_CASE )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
__UpperCAmelCase =kwargs.pop("""tpu_name""" , self.tpu_name )
__UpperCAmelCase =kwargs.pop("""device_idx""" , self.device_idx )
__UpperCAmelCase =kwargs.pop("""eager_mode""" , self.eager_mode )
__UpperCAmelCase =kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**__SCREAMING_SNAKE_CASE )
lowerCamelCase : str = field(
default=UpperCamelCase , metadata={'help': 'Name of TPU'} , )
lowerCamelCase : int = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
lowerCamelCase : bool = field(default=UpperCamelCase , metadata={'help': 'Benchmark models in eager model.'} )
lowerCamelCase : bool = field(
default=UpperCamelCase , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def _a ( self : List[str] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["""tf"""] )
__UpperCAmelCase =None
if self.tpu:
try:
if self.tpu_name:
__UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__UpperCAmelCase =None
return tpu
@cached_property
def _a ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__UpperCAmelCase =tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
__UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
__UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def _a ( self : Optional[Any] ) -> bool:
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self : str ) -> "tf.distribute.Strategy":
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self : Dict ) -> Optional[int]:
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self : List[str] ) -> int:
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self : List[str] ) -> bool:
return self.n_gpu > 0
| 68 | 1 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__A = TypeVar("T")
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (position - 1) // 2
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (2 * position) + 1
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (2 * position) + 2
class _A ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] ) -> None:
__UpperCAmelCase =[]
__UpperCAmelCase ={}
__UpperCAmelCase =0
def __len__( self : str ) -> int:
return self.elements
def __repr__( self : Dict ) -> str:
return str(self.heap )
def _a ( self : Optional[int] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__UpperCAmelCase =self.elements
self.elements += 1
self._bubble_up(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__UpperCAmelCase , __UpperCAmelCase =self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__UpperCAmelCase , __UpperCAmelCase =self.heap[0]
self._bubble_down(__SCREAMING_SNAKE_CASE )
return elem
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Update the weight of the given key
__UpperCAmelCase =self.position_map[elem]
__UpperCAmelCase =(elem, weight)
if position > 0:
__UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__UpperCAmelCase =self.position_map[elem]
if curr_pos == 0:
return None
__UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos]
__UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_up(__SCREAMING_SNAKE_CASE )
return None
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__UpperCAmelCase =self.position_map[elem]
__UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos]
__UpperCAmelCase =get_child_left_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =get_child_right_position(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements and child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position]
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
return None
if child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
return None
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None:
# Swap the nodes at the given positions
__UpperCAmelCase =self.heap[nodea_pos][0]
__UpperCAmelCase =self.heap[nodea_pos][0]
__UpperCAmelCase , __UpperCAmelCase =(
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__UpperCAmelCase =nodea_pos
__UpperCAmelCase =nodea_pos
class _A ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] ) -> None:
__UpperCAmelCase ={}
__UpperCAmelCase =0
def __repr__( self : Tuple ) -> str:
return str(self.connections )
def __len__( self : str ) -> int:
return self.nodes
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__UpperCAmelCase ={}
self.nodes += 1
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__SCREAMING_SNAKE_CASE )
self.add_node(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =weight
__UpperCAmelCase =weight
def lowercase__ ( A_: GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
"""simple docstring"""
__UpperCAmelCase ={node: maxsize for node in graph.connections}
__UpperCAmelCase ={node: None for node in graph.connections}
__UpperCAmelCase =MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(A_ , A_ )
if priority_queue.is_empty():
return dist, parent
# initialization
__UpperCAmelCase =priority_queue.extract_min()
__UpperCAmelCase =0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A_ , dist[neighbour] )
__UpperCAmelCase =node
# running prim's algorithm
while not priority_queue.is_empty():
__UpperCAmelCase =priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A_ , dist[neighbour] )
__UpperCAmelCase =node
return dist, parent
| 68 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _a ( self : int ) -> Union[str, Any]:
__UpperCAmelCase =self.dummy_uncond_unet
__UpperCAmelCase =ScoreSdeVeScheduler()
__UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
sde_ve.to(__SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.manual_seed(0 )
__UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =torch.manual_seed(0 )
__UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )[
0
]
__UpperCAmelCase =image[0, -3:, -3:, -1]
__UpperCAmelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ) -> int:
__UpperCAmelCase ="""google/ncsnpp-church-256"""
__UpperCAmelCase =UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ScoreSdeVeScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
sde_ve.to(__SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.manual_seed(0 )
__UpperCAmelCase =sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCAmelCase =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 68 | 1 |
from __future__ import annotations
__A = 8.9_88E9 # units = N * m^s * C^-2
def lowercase__ ( A_: float , A_: float , A_: float , A_: float ) -> dict[str, float]:
"""simple docstring"""
__UpperCAmelCase =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
__UpperCAmelCase =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__UpperCAmelCase =abs(A_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__UpperCAmelCase =abs(A_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__UpperCAmelCase =(COULOMBS_CONSTANT * charge_product / abs(A_ )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__A = logging.get_logger(__name__)
__A = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=None , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if config is None:
assert isinstance(self.model , __SCREAMING_SNAKE_CASE ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
__UpperCAmelCase =self.model.config
else:
__UpperCAmelCase =config
__UpperCAmelCase =data_args
__UpperCAmelCase =self.config.tgt_vocab_size if isinstance(self.config , __SCREAMING_SNAKE_CASE ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
__UpperCAmelCase =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__UpperCAmelCase =label_smoothed_nll_loss
def _a ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Any:
if self.optimizer is None:
__UpperCAmelCase =["""bias""", """LayerNorm.weight"""]
__UpperCAmelCase =[
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
__UpperCAmelCase =Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__UpperCAmelCase =Adafactor
__UpperCAmelCase ={"""scale_parameter""": False, """relative_step""": False}
else:
__UpperCAmelCase =AdamW
__UpperCAmelCase ={
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
__UpperCAmelCase =self.args.learning_rate
if self.sharded_ddp:
__UpperCAmelCase =OSS(
params=__SCREAMING_SNAKE_CASE , optim=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
__UpperCAmelCase =optimizer_cls(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.lr_scheduler is None:
__UpperCAmelCase =self._get_lr_scheduler(__SCREAMING_SNAKE_CASE )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
__UpperCAmelCase =arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__UpperCAmelCase =schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__UpperCAmelCase =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__UpperCAmelCase =schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE )
return scheduler
def _a ( self : Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
__UpperCAmelCase =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__UpperCAmelCase , __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[:2]
else:
# compute label smoothed loss
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
__UpperCAmelCase =torch.nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 )
__UpperCAmelCase , __UpperCAmelCase =self.loss_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
__UpperCAmelCase =inputs.pop("""labels""" )
__UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return loss
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, Union[torch.Tensor, Any]] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
__UpperCAmelCase =self._prepare_inputs(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__UpperCAmelCase =self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__SCREAMING_SNAKE_CASE , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
__UpperCAmelCase =inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
__UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__UpperCAmelCase =generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
# If PAD token is not defined at least EOS token has to be defined
__UpperCAmelCase =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
__UpperCAmelCase =pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__UpperCAmelCase =tensor
return padded_tensor
| 68 | 1 |
from statistics import mean, stdev
def lowercase__ ( A_: list , A_: int = 3 ) -> list:
"""simple docstring"""
__UpperCAmelCase =min(A_ )
__UpperCAmelCase =max(A_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , A_ ) for x in data]
def lowercase__ ( A_: list , A_: int = 3 ) -> list:
"""simple docstring"""
__UpperCAmelCase =mean(A_ )
__UpperCAmelCase =stdev(A_ )
# standardize data
return [round((x - mu) / (sigma) , A_ ) for x in data]
| 68 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str=0 ) -> Any:
__UpperCAmelCase =floats_tensor((1, 3, 128, 128) , rng=random.Random(__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =np.random.RandomState(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : Optional[Any] ) -> int:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# warmup pass to apply optimizations
__UpperCAmelCase =pipe(**self.get_dummy_inputs() )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self : List[str] ) -> Optional[int]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Dict ) -> int:
__UpperCAmelCase =ort.SessionOptions()
__UpperCAmelCase =False
return options
def _a ( self : Dict ) -> Any:
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase =init_image.resize((768, 512) )
# using the PNDM scheduler by default
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""A fantasy landscape, trending on artstation"""
__UpperCAmelCase =np.random.RandomState(0 )
__UpperCAmelCase =pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
__UpperCAmelCase =output.images
__UpperCAmelCase =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase =np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _a ( self : List[str] ) -> str:
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase =init_image.resize((768, 512) )
__UpperCAmelCase =LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""A fantasy landscape, trending on artstation"""
__UpperCAmelCase =np.random.RandomState(0 )
__UpperCAmelCase =pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
__UpperCAmelCase =output.images
__UpperCAmelCase =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase =np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 68 | 1 |
import numpy as np
import qiskit
def lowercase__ ( A_: int = 8 , A_: int | None = None ) -> str:
"""simple docstring"""
__UpperCAmelCase =np.random.default_rng(seed=A_ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__UpperCAmelCase =6 * key_len
# Measurement basis for Alice's qubits.
__UpperCAmelCase =rng.integers(2 , size=A_ )
# The set of states Alice will prepare.
__UpperCAmelCase =rng.integers(2 , size=A_ )
# Measurement basis for Bob's qubits.
__UpperCAmelCase =rng.integers(2 , size=A_ )
# Quantum Circuit to simulate BB84
__UpperCAmelCase =qiskit.QuantumCircuit(A_ , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(A_ ):
if alice_state[index] == 1:
bbaa_circ.x(A_ )
if alice_basis[index] == 1:
bbaa_circ.h(A_ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(A_ ):
if bob_basis[index] == 1:
bbaa_circ.h(A_ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__UpperCAmelCase =qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__UpperCAmelCase =qiskit.execute(A_ , A_ , shots=1 , seed_simulator=A_ )
# Returns the result of measurement.
__UpperCAmelCase =job.result().get_counts(A_ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__UpperCAmelCase ="""""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
A_ , A_ , A_ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__UpperCAmelCase =gen_key[:key_len] if len(A_ ) >= key_len else gen_key.ljust(A_ , """0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 68 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__A = logging.getLogger(__name__)
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = 'sequence-classification'
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
if type(__SCREAMING_SNAKE_CASE ) == dict:
__UpperCAmelCase =Namespace(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =glue_output_modes[hparams.task]
__UpperCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.mode )
def _a ( self : str , **__SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
return self.model(**__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
__UpperCAmelCase ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__UpperCAmelCase =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
__UpperCAmelCase =self(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =outputs[0]
__UpperCAmelCase =self.trainer.lr_schedulers[0]["""scheduler"""]
__UpperCAmelCase ={"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _a ( self : Tuple ) -> List[Any]:
__UpperCAmelCase =self.hparams
__UpperCAmelCase =processors[args.task]()
__UpperCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
__UpperCAmelCase =self._feature_file(__SCREAMING_SNAKE_CASE )
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
__UpperCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
__UpperCAmelCase =convert_examples_to_features(
__SCREAMING_SNAKE_CASE , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __SCREAMING_SNAKE_CASE )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> DataLoader:
__UpperCAmelCase ="""dev""" if mode == """test""" else mode
__UpperCAmelCase =self._feature_file(__SCREAMING_SNAKE_CASE )
logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.load(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__UpperCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__UpperCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__UpperCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__UpperCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , batch_size=__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int ) -> str:
__UpperCAmelCase ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__UpperCAmelCase =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
__UpperCAmelCase =self(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =outputs[:2]
__UpperCAmelCase =logits.detach().cpu().numpy()
__UpperCAmelCase =inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Any ) -> tuple:
__UpperCAmelCase =torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
__UpperCAmelCase =np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__UpperCAmelCase =np.argmax(__SCREAMING_SNAKE_CASE , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__UpperCAmelCase =np.squeeze(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =np.concatenate([x["""target"""] for x in outputs] , axis=0 )
__UpperCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
__UpperCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
__UpperCAmelCase ={**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
__UpperCAmelCase =dict(results.items() )
__UpperCAmelCase =results
return ret, preds_list, out_label_list
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : list ) -> dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._eval_end(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._eval_end(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _a ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def lowercase__ ( ) -> str:
"""simple docstring"""
__UpperCAmelCase =argparse.ArgumentParser()
add_generic_args(A_ , os.getcwd() )
__UpperCAmelCase =GLUETransformer.add_model_specific_args(A_ , os.getcwd() )
__UpperCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__UpperCAmelCase =os.path.join(
"""./results""" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__UpperCAmelCase =GLUETransformer(A_ )
__UpperCAmelCase =generic_train(A_ , A_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__UpperCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=A_ ) )
__UpperCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(A_ )
if __name__ == "__main__":
main()
| 68 | 1 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowercase__ ( A_: str = "isbn/0140328726" ) -> dict:
"""simple docstring"""
__UpperCAmelCase =olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
__UpperCAmelCase =F'''{olid} is not a valid Open Library olid'''
raise ValueError(A_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def lowercase__ ( A_: dict ) -> dict:
"""simple docstring"""
__UpperCAmelCase ={
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
__UpperCAmelCase ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__UpperCAmelCase =[
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
__UpperCAmelCase =data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(A_ , A_ ):
__UpperCAmelCase =""", """.join(A_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__A = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
__A = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print("\n".join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 68 |
def lowercase__ ( A_: int , A_: int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowercase__ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 68 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(A_ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def lowercase__ ( ) -> int:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(A_ ):
http_head("""https://huggingface.co""" )
| 68 |
from __future__ import annotations
import bisect
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
__UpperCAmelCase =len(A_ )
while lo < hi:
__UpperCAmelCase =lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__UpperCAmelCase =mid + 1
else:
__UpperCAmelCase =mid
return lo
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
__UpperCAmelCase =len(A_ )
while lo < hi:
__UpperCAmelCase =lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__UpperCAmelCase =mid + 1
else:
__UpperCAmelCase =mid
return lo
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_left(A_ , A_ , A_ , A_ ) , A_ )
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_right(A_ , A_ , A_ , A_ ) , A_ )
def lowercase__ ( A_: list[int] , A_: int ) -> int | None:
"""simple docstring"""
__UpperCAmelCase =0
__UpperCAmelCase =len(A_ ) - 1
while left <= right:
__UpperCAmelCase =left + (right - left) // 2
__UpperCAmelCase =sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__UpperCAmelCase =midpoint - 1
else:
__UpperCAmelCase =midpoint + 1
return None
def lowercase__ ( A_: list[int] , A_: int ) -> int | None:
"""simple docstring"""
__UpperCAmelCase =bisect.bisect_left(A_ , A_ )
if index != len(A_ ) and sorted_collection[index] == item:
return index
return None
def lowercase__ ( A_: list[int] , A_: int , A_: int , A_: int ) -> int | None:
"""simple docstring"""
if right < left:
return None
__UpperCAmelCase =left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(A_ , A_ , A_ , midpoint - 1 )
else:
return binary_search_by_recursion(A_ , A_ , midpoint + 1 , A_ )
if __name__ == "__main__":
__A = input("Enter numbers separated by comma:\n").strip()
__A = sorted(int(item) for item in user_input.split(","))
__A = int(input("Enter a single number to be found in the list:\n"))
__A = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 68 | 1 |
def lowercase__ ( A_: int , A_: Tuple ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =[1]
for i in range(2 , A_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__UpperCAmelCase =[]
__UpperCAmelCase =list(range(A_ ) )
# Find permutation
while factorials:
__UpperCAmelCase =factorials.pop()
__UpperCAmelCase , __UpperCAmelCase =divmod(A_ , A_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
from typing import List
from .keymap import KEYMAP, get_character
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
def decorator(A_: int ):
__UpperCAmelCase =getattr(A_ , """handle_key""" , [] )
handle += [key]
setattr(A_ , """handle_key""" , A_ )
return func
return decorator
def lowercase__ ( *A_: List[str] ) -> Optional[int]:
"""simple docstring"""
def decorator(A_: Tuple ):
__UpperCAmelCase =getattr(A_ , """handle_key""" , [] )
handle += keys
setattr(A_ , """handle_key""" , A_ )
return func
return decorator
class _A ( UpperCamelCase ):
"""simple docstring"""
def __new__( cls : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> int:
__UpperCAmelCase =super().__new__(cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not hasattr(__SCREAMING_SNAKE_CASE , """key_handler""" ):
setattr(__SCREAMING_SNAKE_CASE , """key_handler""" , {} )
setattr(__SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] )
for key in handled_keys:
__UpperCAmelCase =value
return new_cls
@staticmethod
def _a ( cls : Dict ) -> List[Any]:
__UpperCAmelCase =get_character()
if char != KEYMAP["undefined"]:
__UpperCAmelCase =ord(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =cls.key_handler.get(__SCREAMING_SNAKE_CASE )
if handler:
__UpperCAmelCase =char
return handler(cls )
else:
return None
def lowercase__ ( cls: str ) -> int:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 68 | 1 |
from ..utils import DummyObject, requires_backends
class _A ( metaclass=UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = ['torch', 'torchsde']
def __init__( self : str , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : int , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Any , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
requires_backends(cls , ["""torch""", """torchsde"""] )
| 68 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 68 | 1 |
def lowercase__ ( A_: Tuple ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =len(A_ )
for i in range(length - 1 ):
__UpperCAmelCase =i
for k in range(i + 1 , A_ ):
if collection[k] < collection[least]:
__UpperCAmelCase =k
if least != i:
__UpperCAmelCase , __UpperCAmelCase =(collection[i], collection[least])
return collection
if __name__ == "__main__":
__A = input("Enter numbers separated by a comma:\n").strip()
__A = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 68 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]=99 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=4 , ) -> Optional[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =seq_length
__UpperCAmelCase =is_training
__UpperCAmelCase =use_attention_mask
__UpperCAmelCase =use_token_type_ids
__UpperCAmelCase =use_labels
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =num_choices
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase =None
if self.use_attention_mask:
__UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase =None
if self.use_token_type_ids:
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase =True
__UpperCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =FlaxRobertaModelTester(self )
@slow
def _a ( self : Optional[Any] ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase =model_class_name.from_pretrained("""roberta-base""" , from_pt=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 68 | 1 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__A = logging.get_logger(__name__)
class _A ( UpperCamelCase ):
"""simple docstring"""
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =[label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if len(__SCREAMING_SNAKE_CASE ) == 0 or len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(__SCREAMING_SNAKE_CASE ) )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =[sequences]
__UpperCAmelCase =[]
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__SCREAMING_SNAKE_CASE )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCamelCase )
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=ZeroShotClassificationArgumentHandler() , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Any ) -> List[str]:
__UpperCAmelCase =args_parser
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a ( self : Any ) -> Optional[int]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a ( self : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=TruncationStrategy.ONLY_FIRST , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
__UpperCAmelCase =self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
__UpperCAmelCase =self.tokenizer.eos_token
try:
__UpperCAmelCase =self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )
except Exception as e:
if "too short" in str(__SCREAMING_SNAKE_CASE ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__UpperCAmelCase =self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a ( self : Tuple , **__SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
if kwargs.get("""multi_class""" , __SCREAMING_SNAKE_CASE ) is not None:
__UpperCAmelCase =kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
__UpperCAmelCase ={}
if "candidate_labels" in kwargs:
__UpperCAmelCase =self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
__UpperCAmelCase =kwargs["""hypothesis_template"""]
__UpperCAmelCase ={}
if "multi_label" in kwargs:
__UpperCAmelCase =kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self : int , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[Any]:
if len(__SCREAMING_SNAKE_CASE ) == 0:
pass
elif len(__SCREAMING_SNAKE_CASE ) == 1 and "candidate_labels" not in kwargs:
__UpperCAmelCase =args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[int]="This example is {}." ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase =self._args_parser(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for i, (candidate_label, sequence_pair) in enumerate(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase =self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__SCREAMING_SNAKE_CASE ) - 1,
**model_input,
}
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> str:
__UpperCAmelCase =inputs["""candidate_label"""]
__UpperCAmelCase =inputs["""sequence"""]
__UpperCAmelCase ={k: inputs[k] for k in self.tokenizer.model_input_names}
__UpperCAmelCase =self.model(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any=False ) -> List[Any]:
__UpperCAmelCase =[outputs["""candidate_label"""] for outputs in model_outputs]
__UpperCAmelCase =[outputs["""sequence"""] for outputs in model_outputs]
__UpperCAmelCase =np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
__UpperCAmelCase =logits.shape[0]
__UpperCAmelCase =len(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =N // n
__UpperCAmelCase =logits.reshape((num_sequences, n, -1) )
if multi_label or len(__SCREAMING_SNAKE_CASE ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__UpperCAmelCase =self.entailment_id
__UpperCAmelCase =-1 if entailment_id == 0 else 0
__UpperCAmelCase =reshaped_outputs[..., [contradiction_id, entailment_id]]
__UpperCAmelCase =np.exp(__SCREAMING_SNAKE_CASE ) / np.exp(__SCREAMING_SNAKE_CASE ).sum(-1 , keepdims=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__UpperCAmelCase =reshaped_outputs[..., self.entailment_id]
__UpperCAmelCase =np.exp(__SCREAMING_SNAKE_CASE ) / np.exp(__SCREAMING_SNAKE_CASE ).sum(-1 , keepdims=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 68 |
from __future__ import annotations
def lowercase__ ( A_: list[list[int]] ) -> int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 1 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__A = TypeVar("T")
class _A ( Generic[T] ):
"""simple docstring"""
lowerCamelCase : deque[T] # Cache store of keys
lowerCamelCase : set[T] # References of the keys in cache
lowerCamelCase : int = 10 # Maximum capacity of cache
def __init__( self : Any , __SCREAMING_SNAKE_CASE : int ) -> None:
__UpperCAmelCase =deque()
__UpperCAmelCase =set()
if not n:
__UpperCAmelCase =sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
__UpperCAmelCase =n
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__UpperCAmelCase =self.dq_store.pop()
self.key_reference.remove(__SCREAMING_SNAKE_CASE )
else:
self.dq_store.remove(__SCREAMING_SNAKE_CASE )
self.dq_store.appendleft(__SCREAMING_SNAKE_CASE )
self.key_reference.add(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> None:
for k in self.dq_store:
print(__SCREAMING_SNAKE_CASE )
def __repr__( self : List[str] ) -> str:
return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 68 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowercase__ ( A_: int , A_: int , A_: int , A_: int , A_: int , A_: int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__UpperCAmelCase =ksize + 1
__UpperCAmelCase =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(A_ ):
for x in range(A_ ):
# distance from center
__UpperCAmelCase =x - ksize // 2
__UpperCAmelCase =y - ksize // 2
# degree to radiant
__UpperCAmelCase =theta / 180 * np.pi
__UpperCAmelCase =np.cos(_theta )
__UpperCAmelCase =np.sin(_theta )
# get kernel x
__UpperCAmelCase =cos_theta * px + sin_theta * py
# get kernel y
__UpperCAmelCase =-sin_theta * px + cos_theta * py
# fill kernel
__UpperCAmelCase =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__A = imread("../image_data/lena.jpg")
# turn image in gray scale value
__A = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__A = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__A = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__A = out / out.max() * 2_55
__A = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 68 | 1 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "spiece.model"}
__A = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
__A = {
"AI-Sweden/gpt-sw3-126m": 20_48,
"AI-Sweden/gpt-sw3-350m": 20_48,
"AI-Sweden/gpt-sw3-1.6b": 20_48,
"AI-Sweden/gpt-sw3-6.7b": 20_48,
"AI-Sweden/gpt-sw3-20b": 20_48,
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
__UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCAmelCase =kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
__UpperCAmelCase ="""None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__UpperCAmelCase ="""<|endoftext|>""" if eos_token is None else eos_token
__UpperCAmelCase ="""<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__UpperCAmelCase =unk_token if pad_token is None else pad_token
__UpperCAmelCase =eos_token if bos_token is None else bos_token
else:
__UpperCAmelCase ="""<pad>""" if pad_token is None else pad_token
__UpperCAmelCase ="""<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =do_lower_case
__UpperCAmelCase =remove_space
__UpperCAmelCase =keep_accents
__UpperCAmelCase =vocab_file
__UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# Used for whitespace normalization in input texts
# fmt : off
__UpperCAmelCase ={""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__UpperCAmelCase =re.compile(
f'''[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__( self : Any ) -> str:
__UpperCAmelCase =self.__dict__.copy()
__UpperCAmelCase =None
return state
def __setstate__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCAmelCase ={}
__UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _a ( self : Union[str, Any] ) -> int:
return len(self.sp_model )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str ) -> str:
__UpperCAmelCase =self.non_printing_characters_re.sub("""""" , __SCREAMING_SNAKE_CASE )
# Normalize whitespaces
__UpperCAmelCase ="""""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
__UpperCAmelCase =unicodedata.normalize("""NFC""" , __SCREAMING_SNAKE_CASE )
return text
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
__UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE )
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> int:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> str:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
@staticmethod
def _a ( __SCREAMING_SNAKE_CASE : str ) -> str:
return out_string
def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> str:
__UpperCAmelCase =[]
__UpperCAmelCase =""""""
__UpperCAmelCase =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
__UpperCAmelCase =True
__UpperCAmelCase =[]
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string
def _a ( self : Any ) -> Dict[str, int]:
__UpperCAmelCase ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase =os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi:
__UpperCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase =[self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text]
__UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE )
if return_tensors is True or return_tensors == "pt":
__UpperCAmelCase =torch.tensor(__SCREAMING_SNAKE_CASE )
return token_ids
def _a ( self : str , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str:
return self.sp_model.decode(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]:
__UpperCAmelCase =[f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
__UpperCAmelCase =(
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(__SCREAMING_SNAKE_CASE ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=__SCREAMING_SNAKE_CASE )
| 68 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 2, 1] , __SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 4] , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Any=2.0 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Tuple=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : Dict=8 , ) -> List[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =image_size
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =embed_dim
__UpperCAmelCase =depths
__UpperCAmelCase =num_heads
__UpperCAmelCase =window_size
__UpperCAmelCase =mlp_ratio
__UpperCAmelCase =qkv_bias
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =drop_path_rate
__UpperCAmelCase =hidden_act
__UpperCAmelCase =use_absolute_embeddings
__UpperCAmelCase =patch_norm
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =initializer_range
__UpperCAmelCase =is_training
__UpperCAmelCase =scope
__UpperCAmelCase =use_labels
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =encoder_stride
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase =None
if self.use_labels:
__UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase =self.get_config()
return config, pixel_values, labels
def _a ( self : List[Any] ) -> Optional[Any]:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
__UpperCAmelCase =SwinvaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Tuple:
__UpperCAmelCase =SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase =1
__UpperCAmelCase =SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__UpperCAmelCase =self.type_sequence_label_size
__UpperCAmelCase =SwinvaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : List[str] ) -> Tuple:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCamelCase : Tuple = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Dict = False
lowerCamelCase : Tuple = False
lowerCamelCase : List[str] = False
lowerCamelCase : Tuple = False
def _a ( self : str ) -> str:
__UpperCAmelCase =SwinvaModelTester(self )
__UpperCAmelCase =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 )
def _a ( self : List[Any] ) -> Optional[int]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : str ) -> str:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def _a ( self : Tuple ) -> Tuple:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def _a ( self : Optional[Any] ) -> int:
pass
def _a ( self : Tuple ) -> int:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _a ( self : str ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase =[*signature.parameters.keys()]
__UpperCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =True
for model_class in self.all_model_classes:
__UpperCAmelCase =True
__UpperCAmelCase =False
__UpperCAmelCase =True
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
__UpperCAmelCase =len(self.model_tester.depths )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase =True
__UpperCAmelCase =config.window_size**2
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__UpperCAmelCase =len(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__UpperCAmelCase =True
__UpperCAmelCase =True
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
__UpperCAmelCase =self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__UpperCAmelCase =2
self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.hidden_states
__UpperCAmelCase =getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# Swinv2 has a different seq_length
__UpperCAmelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase =outputs.reshaped_hidden_states
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =reshaped_hidden_states[0].shape
__UpperCAmelCase =(
reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a ( self : str ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =3
__UpperCAmelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Dict:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _a ( self : int ) -> Dict:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase =SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _a ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =_config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ) -> Dict:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def _a ( self : int ) -> Optional[int]:
__UpperCAmelCase =SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.default_image_processor
__UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCAmelCase =image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__UpperCAmelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 68 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A = 16
__A = 32
def lowercase__ ( A_: Accelerator , A_: int = 16 ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase =AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCAmelCase =load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A_: Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A_ , max_length=A_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase =datasets.map(
A_ , batched=A_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A_: Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase =16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase =8
else:
__UpperCAmelCase =None
return tokenizer.pad(
A_ , padding="""longest""" , max_length=A_ , pad_to_multiple_of=A_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
__UpperCAmelCase =DataLoader(
tokenized_datasets["""train"""] , shuffle=A_ , collate_fn=A_ , batch_size=A_ )
__UpperCAmelCase =DataLoader(
tokenized_datasets["""validation"""] , shuffle=A_ , collate_fn=A_ , batch_size=A_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A = mocked_dataloaders # noqa: F811
def lowercase__ ( A_: List[str] , A_: List[str] ) -> Dict:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , A_ ) == "1":
__UpperCAmelCase =2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__UpperCAmelCase =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
__UpperCAmelCase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase =config["""lr"""]
__UpperCAmelCase =int(config["""num_epochs"""] )
__UpperCAmelCase =int(config["""seed"""] )
__UpperCAmelCase =int(config["""batch_size"""] )
set_seed(A_ )
__UpperCAmelCase , __UpperCAmelCase =get_dataloaders(A_ , A_ )
__UpperCAmelCase =evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__UpperCAmelCase =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCAmelCase =batch_size // MAX_GPU_BATCH_SIZE
__UpperCAmelCase =MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=A_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase =model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase =AdamW(params=model.parameters() , lr=A_ )
# Instantiate scheduler
__UpperCAmelCase =get_linear_schedule_with_warmup(
optimizer=A_ , num_warmup_steps=100 , num_training_steps=(len(A_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =accelerator.prepare(
A_ , A_ , A_ , A_ , A_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__UpperCAmelCase =os.path.split(A_ )[-1].split(""".""" )[0]
accelerator.init_trackers(A_ , A_ )
# Now we train the model
for epoch in range(A_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__UpperCAmelCase =0
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase =model(**A_ )
__UpperCAmelCase =outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__UpperCAmelCase =loss / gradient_accumulation_steps
accelerator.backward(A_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase =model(**A_ )
__UpperCAmelCase =outputs.logits.argmax(dim=-1 )
__UpperCAmelCase , __UpperCAmelCase =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=A_ , references=A_ , )
__UpperCAmelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , A_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(A_ ),
"""epoch""": epoch,
} , step=A_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=A_ , default=A_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=A_ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
__UpperCAmelCase =parser.parse_args()
__UpperCAmelCase ={"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(A_ , A_ )
if __name__ == "__main__":
main()
| 68 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "spiece.model"}
__A = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
__A = {
"AI-Sweden/gpt-sw3-126m": 20_48,
"AI-Sweden/gpt-sw3-350m": 20_48,
"AI-Sweden/gpt-sw3-1.6b": 20_48,
"AI-Sweden/gpt-sw3-6.7b": 20_48,
"AI-Sweden/gpt-sw3-20b": 20_48,
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
__UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCAmelCase =kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
__UpperCAmelCase ="""None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__UpperCAmelCase ="""<|endoftext|>""" if eos_token is None else eos_token
__UpperCAmelCase ="""<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__UpperCAmelCase =unk_token if pad_token is None else pad_token
__UpperCAmelCase =eos_token if bos_token is None else bos_token
else:
__UpperCAmelCase ="""<pad>""" if pad_token is None else pad_token
__UpperCAmelCase ="""<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =do_lower_case
__UpperCAmelCase =remove_space
__UpperCAmelCase =keep_accents
__UpperCAmelCase =vocab_file
__UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# Used for whitespace normalization in input texts
# fmt : off
__UpperCAmelCase ={""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__UpperCAmelCase =re.compile(
f'''[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__( self : Any ) -> str:
__UpperCAmelCase =self.__dict__.copy()
__UpperCAmelCase =None
return state
def __setstate__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCAmelCase ={}
__UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _a ( self : Union[str, Any] ) -> int:
return len(self.sp_model )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str ) -> str:
__UpperCAmelCase =self.non_printing_characters_re.sub("""""" , __SCREAMING_SNAKE_CASE )
# Normalize whitespaces
__UpperCAmelCase ="""""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
__UpperCAmelCase =unicodedata.normalize("""NFC""" , __SCREAMING_SNAKE_CASE )
return text
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
__UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE )
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> int:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> str:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
@staticmethod
def _a ( __SCREAMING_SNAKE_CASE : str ) -> str:
return out_string
def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> str:
__UpperCAmelCase =[]
__UpperCAmelCase =""""""
__UpperCAmelCase =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
__UpperCAmelCase =True
__UpperCAmelCase =[]
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string
def _a ( self : Any ) -> Dict[str, int]:
__UpperCAmelCase ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase =os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi:
__UpperCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase =[self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text]
__UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE )
if return_tensors is True or return_tensors == "pt":
__UpperCAmelCase =torch.tensor(__SCREAMING_SNAKE_CASE )
return token_ids
def _a ( self : str , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str:
return self.sp_model.decode(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]:
__UpperCAmelCase =[f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
__UpperCAmelCase =(
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(__SCREAMING_SNAKE_CASE ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=__SCREAMING_SNAKE_CASE )
| 68 | 1 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]=99 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=4 , ) -> Optional[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =seq_length
__UpperCAmelCase =is_training
__UpperCAmelCase =use_attention_mask
__UpperCAmelCase =use_token_type_ids
__UpperCAmelCase =use_labels
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =num_choices
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase =None
if self.use_attention_mask:
__UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase =None
if self.use_token_type_ids:
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase =True
__UpperCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =FlaxRobertaModelTester(self )
@slow
def _a ( self : Optional[Any] ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase =model_class_name.from_pretrained("""roberta-base""" , from_pt=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 68 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__A = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Path , __SCREAMING_SNAKE_CASE : Union[str, None] = None , __SCREAMING_SNAKE_CASE : Union[List[str], None] = None , __SCREAMING_SNAKE_CASE : Union[str, List[str], None] = None , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[str]:
__UpperCAmelCase =[file for file in os.listdir(__SCREAMING_SNAKE_CASE ) if os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )]
if identifier is not None:
__UpperCAmelCase =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for n_ in n_identifier:
__UpperCAmelCase =[file for file in files if n_ not in file]
else:
__UpperCAmelCase =[file for file in files if n_identifier not in file]
__UpperCAmelCase =ignore_files or []
ignore_files.append("""__init__.py""" )
__UpperCAmelCase =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __SCREAMING_SNAKE_CASE )
if only_modules:
__UpperCAmelCase =file.split(""".""" )[0]
try:
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =doctest.DocTestSuite(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =unittest.TextTestRunner().run(__SCREAMING_SNAKE_CASE )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__UpperCAmelCase =doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _a ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""modeling"""
__UpperCAmelCase =[
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""tokenization"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""configuration"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ) -> Tuple:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase =["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , n_identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase =Path("""docs/source""" )
__UpperCAmelCase =["""favicon.ico"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE , only_modules=__SCREAMING_SNAKE_CASE )
| 68 | 1 |
import re
import string
import numpy as np
import datasets
__A = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
__A = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
__A = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
"""simple docstring"""
def _a ( self : List[str] ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : str=False , ) -> Optional[Any]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__UpperCAmelCase =np.array([re.sub(__SCREAMING_SNAKE_CASE , """""" , __SCREAMING_SNAKE_CASE ) for x in predictions] )
__UpperCAmelCase =np.array([re.sub(__SCREAMING_SNAKE_CASE , """""" , __SCREAMING_SNAKE_CASE ) for x in references] )
else:
__UpperCAmelCase =np.asarray(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
__UpperCAmelCase =np.char.lower(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
__UpperCAmelCase =string.punctuation.maketrans("""""" , """""" , string.punctuation )
__UpperCAmelCase =np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
__UpperCAmelCase =string.digits.maketrans("""""" , """""" , string.digits )
__UpperCAmelCase =np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 68 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__A = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase__ ( A_: int , A_: Optional[Any] , A_: List[str]=None ) -> List[str]:
"""simple docstring"""
if rng is None:
__UpperCAmelCase =random.Random()
__UpperCAmelCase =1
for dim in shape:
total_dims *= dim
__UpperCAmelCase =[]
for _ in range(A_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__UpperCAmelCase =np.array(A_ , dtype=jnp.intaa ).reshape(A_ )
return output
def lowercase__ ( A_: List[str] , A_: List[str]=None ) -> Any:
"""simple docstring"""
__UpperCAmelCase =ids_tensor(A_ , vocab_size=2 , rng=A_ )
# make sure that at least one token is attended to for each batch
__UpperCAmelCase =1
return attn_mask
@require_flax
class _A :
"""simple docstring"""
lowerCamelCase : Optional[Any] = None
lowerCamelCase : int = ()
def _a ( self : str ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__UpperCAmelCase =2
__UpperCAmelCase =inputs["""input_ids"""].shape[-1] // 2
__UpperCAmelCase =inputs["""input_ids"""][:max_batch_size, :sequence_length]
__UpperCAmelCase =jnp.ones_like(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__UpperCAmelCase =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__UpperCAmelCase =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _a ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =0
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =pt_model_class(__SCREAMING_SNAKE_CASE ).eval()
__UpperCAmelCase =load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , flax_model.params )
__UpperCAmelCase =flax_model.generate(__SCREAMING_SNAKE_CASE ).sequences
__UpperCAmelCase =pt_model.generate(torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__UpperCAmelCase =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _a ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
__UpperCAmelCase =0.8
__UpperCAmelCase =10
__UpperCAmelCase =0.3
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =2
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ) -> Any:
__UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
__UpperCAmelCase =FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__UpperCAmelCase ="""Hello world"""
__UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """do_samples""" ):
model.generate(__SCREAMING_SNAKE_CASE , do_samples=__SCREAMING_SNAKE_CASE )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """foo""" ):
__UpperCAmelCase ={"""foo""": """bar"""}
model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 68 | 1 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=5_12,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def lowercase__ ( A_: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
__A = parser.parse_args()
__A = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 68 |
from __future__ import annotations
from collections.abc import Iterator
class _A :
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> None:
__UpperCAmelCase =value
__UpperCAmelCase =None
__UpperCAmelCase =None
class _A :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Node ) -> None:
__UpperCAmelCase =tree
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Node | None ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__A = logging.get_logger(__name__)
# TODO: upload to AWS
__A = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Any = 'retribert'
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int=30522 , __SCREAMING_SNAKE_CASE : List[Any]=768 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : List[str]=3072 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Any=1e-12 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[Any]=128 , __SCREAMING_SNAKE_CASE : str=0 , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =hidden_act
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =share_encoders
__UpperCAmelCase =projection_dim
| 68 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowercase__ ( A_: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A_ , AssumeRolePolicyDocument=json.dumps(A_ , indent=2 ) )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def lowercase__ ( A_: Dict ) -> Any:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
return iam_client.get_role(RoleName=A_ )["Role"]["Arn"]
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =_ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , A_ , )
__UpperCAmelCase =None
if credentials_configuration == 0:
__UpperCAmelCase =_ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
__UpperCAmelCase =aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__UpperCAmelCase =_ask_field("""AWS Access Key ID: """ )
__UpperCAmelCase =aws_access_key_id
__UpperCAmelCase =_ask_field("""AWS Secret Access Key: """ )
__UpperCAmelCase =aws_secret_access_key
__UpperCAmelCase =_ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
__UpperCAmelCase =aws_region
__UpperCAmelCase =_ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , A_ , )
if role_management == 0:
__UpperCAmelCase =_ask_field("""Enter your IAM role name: """ )
else:
__UpperCAmelCase ="""accelerate_sagemaker_execution_role"""
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(A_ )
__UpperCAmelCase =_ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_custom_docker_image:
__UpperCAmelCase =_ask_field("""Enter your Docker image: """ , lambda A_ : str(A_ ).lower() )
__UpperCAmelCase =_ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_inputs_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_metrics_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
__UpperCAmelCase ={}
__UpperCAmelCase =_ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
__UpperCAmelCase ="""dynamo_"""
__UpperCAmelCase =_ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__UpperCAmelCase =_ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
__UpperCAmelCase =_ask_options(
"""Which mode do you want to use?""" , A_ , lambda A_ : TORCH_DYNAMO_MODES[int(A_ )] , default="""default""" , )
__UpperCAmelCase =_ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase ="""Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__UpperCAmelCase =_ask_options(
A_ , A_ , lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__UpperCAmelCase =_ask_field(A_ , lambda A_ : str(A_ ).lower() , default="""ml.p3.2xlarge""" )
__UpperCAmelCase =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__UpperCAmelCase =_ask_field(
"""How many machines do you want use? [1]: """ , A_ , default=1 , )
__UpperCAmelCase =_ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=A_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A_ , use_cpu=A_ , dynamo_config=A_ , eca_instance_type=A_ , profile=A_ , region=A_ , iam_role_name=A_ , mixed_precision=A_ , num_machines=A_ , sagemaker_inputs_file=A_ , sagemaker_metrics_file=A_ , )
| 68 | 1 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__A = get_logger(__name__)
def lowercase__ ( A_: Any , A_: List[str] , A_: Dict , A_: List[str] , A_: List[Any]=0 ) -> int:
"""simple docstring"""
os.makedirs(A_ , exist_ok=A_ )
with FSDP.state_dict_type(
A_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__UpperCAmelCase =model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__UpperCAmelCase =F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__UpperCAmelCase =os.path.join(A_ , A_ )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(A_ , A_ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__UpperCAmelCase =(
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__UpperCAmelCase =os.path.join(A_ , A_ )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(A_ , A_ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__UpperCAmelCase =os.path.join(A_ , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(A_ , exist_ok=A_ )
logger.info(F'''Saving model to {ckpt_dir}''' )
__UpperCAmelCase ={"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=A_ , storage_writer=dist_cp.FileSystemWriter(A_ ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def lowercase__ ( A_: Optional[int] , A_: Dict , A_: Union[str, Any] , A_: Optional[int] , A_: Union[str, Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__UpperCAmelCase =F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__UpperCAmelCase =os.path.join(A_ , A_ )
logger.info(F'''Loading model from {input_model_file}''' )
__UpperCAmelCase =torch.load(A_ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__UpperCAmelCase =(
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__UpperCAmelCase =os.path.join(A_ , A_ )
logger.info(F'''Loading model from {input_model_file}''' )
__UpperCAmelCase =torch.load(A_ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__UpperCAmelCase =(
os.path.join(A_ , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
__UpperCAmelCase ={"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A_ , storage_reader=dist_cp.FileSystemReader(A_ ) , planner=DefaultLoadPlanner() , )
__UpperCAmelCase =state_dict["""model"""]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(A_ )
def lowercase__ ( A_: int , A_: Union[str, Any] , A_: Union[str, Any] , A_: List[str] , A_: List[str] , A_: int=0 ) -> int:
"""simple docstring"""
os.makedirs(A_ , exist_ok=A_ )
with FSDP.state_dict_type(
A_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__UpperCAmelCase =FSDP.optim_state_dict(A_ , A_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__UpperCAmelCase =(
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__UpperCAmelCase =os.path.join(A_ , A_ )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(A_ , A_ )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
__UpperCAmelCase =os.path.join(A_ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(A_ , exist_ok=A_ )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(A_ ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def lowercase__ ( A_: Optional[Any] , A_: Any , A_: List[Any] , A_: Optional[Any] , A_: Any , A_: int=0 ) -> Any:
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__UpperCAmelCase =None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__UpperCAmelCase =(
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__UpperCAmelCase =os.path.join(A_ , A_ )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
__UpperCAmelCase =torch.load(A_ )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
__UpperCAmelCase =(
os.path.join(A_ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
__UpperCAmelCase =load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(A_ ) , )
__UpperCAmelCase =optim_state["""optimizer"""]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
__UpperCAmelCase =FSDP.optim_state_dict_to_load(A_ , A_ , A_ )
optimizer.load_state_dict(A_ )
| 68 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'ctrl'
lowerCamelCase : Any = ['past_key_values']
lowerCamelCase : Optional[int] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=246534 , __SCREAMING_SNAKE_CASE : int=256 , __SCREAMING_SNAKE_CASE : Optional[Any]=1280 , __SCREAMING_SNAKE_CASE : Optional[Any]=8192 , __SCREAMING_SNAKE_CASE : int=48 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=1e-6 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , **__SCREAMING_SNAKE_CASE : int , ) -> Any:
__UpperCAmelCase =vocab_size
__UpperCAmelCase =n_positions
__UpperCAmelCase =n_embd
__UpperCAmelCase =n_layer
__UpperCAmelCase =n_head
__UpperCAmelCase =dff
__UpperCAmelCase =resid_pdrop
__UpperCAmelCase =embd_pdrop
__UpperCAmelCase =layer_norm_epsilon
__UpperCAmelCase =initializer_range
__UpperCAmelCase =use_cache
super().__init__(**__SCREAMING_SNAKE_CASE )
| 68 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__A = logging.get_logger(__name__)
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = ['input_features']
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=80 , __SCREAMING_SNAKE_CASE : List[Any]=16000 , __SCREAMING_SNAKE_CASE : int=160 , __SCREAMING_SNAKE_CASE : Optional[int]=30 , __SCREAMING_SNAKE_CASE : str=400 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Union[str, Any]:
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =n_fft
__UpperCAmelCase =hop_length
__UpperCAmelCase =chunk_length
__UpperCAmelCase =chunk_length * sampling_rate
__UpperCAmelCase =self.n_samples // hop_length
__UpperCAmelCase =sampling_rate
__UpperCAmelCase =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm="""slaney""" , mel_scale="""slaney""" , )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : np.array ) -> np.ndarray:
__UpperCAmelCase =spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
__UpperCAmelCase =log_spec[:, :-1]
__UpperCAmelCase =np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
__UpperCAmelCase =(log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _a ( __SCREAMING_SNAKE_CASE : List[np.ndarray] , __SCREAMING_SNAKE_CASE : List[np.ndarray] , __SCREAMING_SNAKE_CASE : float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
__UpperCAmelCase =np.array(__SCREAMING_SNAKE_CASE , np.intaa )
__UpperCAmelCase =[]
for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
__UpperCAmelCase =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__UpperCAmelCase =padding_value
normed_input_values.append(__SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase =[(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "max_length" , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , **__SCREAMING_SNAKE_CASE : Any , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__UpperCAmelCase =isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__UpperCAmelCase =is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCAmelCase =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__UpperCAmelCase =np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCAmelCase =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCAmelCase =[np.asarray([raw_speech] ).T]
__UpperCAmelCase =BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
__UpperCAmelCase =self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__UpperCAmelCase =self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
__UpperCAmelCase =np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
__UpperCAmelCase =padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
__UpperCAmelCase =[self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =[np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
__UpperCAmelCase =input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__UpperCAmelCase =padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
__UpperCAmelCase =padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
def _a ( self : int ) -> Dict[str, Any]:
__UpperCAmelCase =copy.deepcopy(self.__dict__ )
__UpperCAmelCase =self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 68 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowercase__ ( A_: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase =k.replace(A_ , A_ )
if k.startswith("""encoder""" ):
__UpperCAmelCase =k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowercase__ ( A_: Tuple ) -> str:
"""simple docstring"""
__UpperCAmelCase =[
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase =sd.pop(A_ )
__UpperCAmelCase =k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase =v
__A = ["START"]
@torch.no_grad()
def lowercase__ ( A_: List[Any] , A_: str , A_: int ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase =torch.load(A_ , map_location="""cpu""" )
__UpperCAmelCase =model["""model"""]
__UpperCAmelCase =BlenderbotConfig.from_json_file(A_ )
__UpperCAmelCase =BlenderbotForConditionalGeneration(A_ )
__UpperCAmelCase =m.model.state_dict().keys()
__UpperCAmelCase =[]
__UpperCAmelCase ={}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase =rename_state_dict_key(A_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase =v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(A_ )
m.model.load_state_dict(A_ , strict=A_ )
m.half()
m.save_pretrained(A_ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__A = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 68 | 1 |
from collections.abc import Callable
import numpy as np
def lowercase__ ( A_: Callable , A_: float , A_: float , A_: float , A_: float ) -> np.array:
"""simple docstring"""
__UpperCAmelCase =int(np.ceil((x_end - xa) / step_size ) )
__UpperCAmelCase =np.zeros((n + 1,) )
__UpperCAmelCase =ya
__UpperCAmelCase =xa
for k in range(A_ ):
__UpperCAmelCase =y[k] + step_size * ode_func(A_ , y[k] )
__UpperCAmelCase =y[k] + (
(step_size / 2) * (ode_func(A_ , y[k] ) + ode_func(x + step_size , A_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
from itertools import permutations
def lowercase__ ( A_: tuple ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__UpperCAmelCase =[7, 11, 13, 17]
for i, test in enumerate(A_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase__ ( A_: int = 10 ) -> int:
"""simple docstring"""
return sum(
int("""""".join(map(A_ , A_ ) ) )
for num in permutations(range(A_ ) )
if is_substring_divisible(A_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 68 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowercase__ ( A_: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A_ , AssumeRolePolicyDocument=json.dumps(A_ , indent=2 ) )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def lowercase__ ( A_: Dict ) -> Any:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
return iam_client.get_role(RoleName=A_ )["Role"]["Arn"]
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =_ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , A_ , )
__UpperCAmelCase =None
if credentials_configuration == 0:
__UpperCAmelCase =_ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
__UpperCAmelCase =aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__UpperCAmelCase =_ask_field("""AWS Access Key ID: """ )
__UpperCAmelCase =aws_access_key_id
__UpperCAmelCase =_ask_field("""AWS Secret Access Key: """ )
__UpperCAmelCase =aws_secret_access_key
__UpperCAmelCase =_ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
__UpperCAmelCase =aws_region
__UpperCAmelCase =_ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , A_ , )
if role_management == 0:
__UpperCAmelCase =_ask_field("""Enter your IAM role name: """ )
else:
__UpperCAmelCase ="""accelerate_sagemaker_execution_role"""
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(A_ )
__UpperCAmelCase =_ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_custom_docker_image:
__UpperCAmelCase =_ask_field("""Enter your Docker image: """ , lambda A_ : str(A_ ).lower() )
__UpperCAmelCase =_ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_inputs_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_metrics_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
__UpperCAmelCase ={}
__UpperCAmelCase =_ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
__UpperCAmelCase ="""dynamo_"""
__UpperCAmelCase =_ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__UpperCAmelCase =_ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
__UpperCAmelCase =_ask_options(
"""Which mode do you want to use?""" , A_ , lambda A_ : TORCH_DYNAMO_MODES[int(A_ )] , default="""default""" , )
__UpperCAmelCase =_ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase ="""Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__UpperCAmelCase =_ask_options(
A_ , A_ , lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__UpperCAmelCase =_ask_field(A_ , lambda A_ : str(A_ ).lower() , default="""ml.p3.2xlarge""" )
__UpperCAmelCase =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__UpperCAmelCase =_ask_field(
"""How many machines do you want use? [1]: """ , A_ , default=1 , )
__UpperCAmelCase =_ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=A_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A_ , use_cpu=A_ , dynamo_config=A_ , eca_instance_type=A_ , profile=A_ , region=A_ , iam_role_name=A_ , mixed_precision=A_ , num_machines=A_ , sagemaker_inputs_file=A_ , sagemaker_metrics_file=A_ , )
| 68 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__A = TypeVar("T")
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (position - 1) // 2
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (2 * position) + 1
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (2 * position) + 2
class _A ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] ) -> None:
__UpperCAmelCase =[]
__UpperCAmelCase ={}
__UpperCAmelCase =0
def __len__( self : str ) -> int:
return self.elements
def __repr__( self : Dict ) -> str:
return str(self.heap )
def _a ( self : Optional[int] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__UpperCAmelCase =self.elements
self.elements += 1
self._bubble_up(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__UpperCAmelCase , __UpperCAmelCase =self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__UpperCAmelCase , __UpperCAmelCase =self.heap[0]
self._bubble_down(__SCREAMING_SNAKE_CASE )
return elem
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Update the weight of the given key
__UpperCAmelCase =self.position_map[elem]
__UpperCAmelCase =(elem, weight)
if position > 0:
__UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__UpperCAmelCase =self.position_map[elem]
if curr_pos == 0:
return None
__UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos]
__UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_up(__SCREAMING_SNAKE_CASE )
return None
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__UpperCAmelCase =self.position_map[elem]
__UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos]
__UpperCAmelCase =get_child_left_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =get_child_right_position(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements and child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position]
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
return None
if child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
return None
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None:
# Swap the nodes at the given positions
__UpperCAmelCase =self.heap[nodea_pos][0]
__UpperCAmelCase =self.heap[nodea_pos][0]
__UpperCAmelCase , __UpperCAmelCase =(
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__UpperCAmelCase =nodea_pos
__UpperCAmelCase =nodea_pos
class _A ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] ) -> None:
__UpperCAmelCase ={}
__UpperCAmelCase =0
def __repr__( self : Tuple ) -> str:
return str(self.connections )
def __len__( self : str ) -> int:
return self.nodes
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__UpperCAmelCase ={}
self.nodes += 1
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__SCREAMING_SNAKE_CASE )
self.add_node(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =weight
__UpperCAmelCase =weight
def lowercase__ ( A_: GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
"""simple docstring"""
__UpperCAmelCase ={node: maxsize for node in graph.connections}
__UpperCAmelCase ={node: None for node in graph.connections}
__UpperCAmelCase =MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(A_ , A_ )
if priority_queue.is_empty():
return dist, parent
# initialization
__UpperCAmelCase =priority_queue.extract_min()
__UpperCAmelCase =0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A_ , dist[neighbour] )
__UpperCAmelCase =node
# running prim's algorithm
while not priority_queue.is_empty():
__UpperCAmelCase =priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A_ , dist[neighbour] )
__UpperCAmelCase =node
return dist, parent
| 68 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
__UpperCAmelCase =jnp.ones((batch_size, length) ) / length
return scores
def _a ( self : str ) -> Any:
__UpperCAmelCase =None
__UpperCAmelCase =20
__UpperCAmelCase =self._get_uniform_logits(batch_size=2 , length=__SCREAMING_SNAKE_CASE )
# tweak scores to not be uniform anymore
__UpperCAmelCase =scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__UpperCAmelCase =scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__UpperCAmelCase =jax.nn.softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__UpperCAmelCase =FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCAmelCase =FlaxTemperatureLogitsWarper(temperature=1.3 )
__UpperCAmelCase =jax.nn.softmax(temp_dist_warper_sharper(__SCREAMING_SNAKE_CASE , scores.copy() , cur_len=__SCREAMING_SNAKE_CASE ) , axis=-1 )
__UpperCAmelCase =jax.nn.softmax(temp_dist_warper_smoother(__SCREAMING_SNAKE_CASE , scores.copy() , cur_len=__SCREAMING_SNAKE_CASE ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _a ( self : List[str] ) -> Any:
__UpperCAmelCase =None
__UpperCAmelCase =10
__UpperCAmelCase =2
# create ramp distribution
__UpperCAmelCase =np.broadcast_to(np.arange(__SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy()
__UpperCAmelCase =ramp_logits[1:, : vocab_size // 2] + vocab_size
__UpperCAmelCase =FlaxTopKLogitsWarper(3 )
__UpperCAmelCase =top_k_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__UpperCAmelCase =5
__UpperCAmelCase =FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__UpperCAmelCase =np.broadcast_to(np.arange(__SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy()
__UpperCAmelCase =top_k_warp_safety_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _a ( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase =None
__UpperCAmelCase =10
__UpperCAmelCase =2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__UpperCAmelCase =np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__UpperCAmelCase =FlaxTopPLogitsWarper(0.8 )
__UpperCAmelCase =np.exp(top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__UpperCAmelCase =np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# check edge cases with negative and extreme logits
__UpperCAmelCase =np.broadcast_to(np.arange(__SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__UpperCAmelCase =ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
__UpperCAmelCase =FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__UpperCAmelCase =top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _a ( self : str ) -> Union[str, Any]:
__UpperCAmelCase =20
__UpperCAmelCase =4
__UpperCAmelCase =0
__UpperCAmelCase =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__SCREAMING_SNAKE_CASE )
# check that min length is applied at length 5
__UpperCAmelCase =ids_tensor((batch_size, 20) , vocab_size=20 )
__UpperCAmelCase =5
__UpperCAmelCase =self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =min_dist_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
__UpperCAmelCase =self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =15
__UpperCAmelCase =min_dist_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(__SCREAMING_SNAKE_CASE ).any() )
def _a ( self : List[Any] ) -> Tuple:
__UpperCAmelCase =20
__UpperCAmelCase =4
__UpperCAmelCase =0
__UpperCAmelCase =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the bos_token_id score
__UpperCAmelCase =ids_tensor((batch_size, 1) , vocab_size=20 )
__UpperCAmelCase =1
__UpperCAmelCase =self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__UpperCAmelCase =3
__UpperCAmelCase =self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(__SCREAMING_SNAKE_CASE ).any() )
def _a ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase =20
__UpperCAmelCase =4
__UpperCAmelCase =0
__UpperCAmelCase =5
__UpperCAmelCase =FlaxForcedEOSTokenLogitsProcessor(max_length=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the eos_token_id when max_length is reached
__UpperCAmelCase =ids_tensor((batch_size, 4) , vocab_size=20 )
__UpperCAmelCase =4
__UpperCAmelCase =self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__UpperCAmelCase =3
__UpperCAmelCase =self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(__SCREAMING_SNAKE_CASE ).any() )
def _a ( self : int ) -> int:
__UpperCAmelCase =4
__UpperCAmelCase =10
__UpperCAmelCase =15
__UpperCAmelCase =2
__UpperCAmelCase =1
__UpperCAmelCase =15
# dummy input_ids and scores
__UpperCAmelCase =ids_tensor((batch_size, sequence_length) , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =input_ids.copy()
__UpperCAmelCase =self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =scores.copy()
# instantiate all dist processors
__UpperCAmelCase =FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCAmelCase =FlaxTopKLogitsWarper(3 )
__UpperCAmelCase =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__UpperCAmelCase =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =FlaxForcedEOSTokenLogitsProcessor(max_length=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =10
# no processor list
__UpperCAmelCase =temp_dist_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =top_k_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =min_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =bos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =eos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
# with processor list
__UpperCAmelCase =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__UpperCAmelCase =processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _a ( self : Optional[int] ) -> int:
__UpperCAmelCase =4
__UpperCAmelCase =10
__UpperCAmelCase =15
__UpperCAmelCase =2
__UpperCAmelCase =1
__UpperCAmelCase =15
# dummy input_ids and scores
__UpperCAmelCase =ids_tensor((batch_size, sequence_length) , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =input_ids.copy()
__UpperCAmelCase =self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =scores.copy()
# instantiate all dist processors
__UpperCAmelCase =FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCAmelCase =FlaxTopKLogitsWarper(3 )
__UpperCAmelCase =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__UpperCAmelCase =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =FlaxForcedEOSTokenLogitsProcessor(max_length=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =10
# no processor list
def run_no_processor_list(__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ):
__UpperCAmelCase =temp_dist_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =top_k_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =min_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =bos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =eos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
return scores
# with processor list
def run_processor_list(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int ):
__UpperCAmelCase =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__UpperCAmelCase =processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
return scores
__UpperCAmelCase =jax.jit(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jax.jit(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jitted_run_no_processor_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jitted_run_processor_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 68 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A = logging.get_logger(__name__)
@dataclass
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Any , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase =deprecated_arg[3:]
__UpperCAmelCase =not kwargs.pop(__SCREAMING_SNAKE_CASE )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
__UpperCAmelCase =kwargs.pop("""tpu_name""" , self.tpu_name )
__UpperCAmelCase =kwargs.pop("""device_idx""" , self.device_idx )
__UpperCAmelCase =kwargs.pop("""eager_mode""" , self.eager_mode )
__UpperCAmelCase =kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**__SCREAMING_SNAKE_CASE )
lowerCamelCase : str = field(
default=UpperCamelCase , metadata={'help': 'Name of TPU'} , )
lowerCamelCase : int = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
lowerCamelCase : bool = field(default=UpperCamelCase , metadata={'help': 'Benchmark models in eager model.'} )
lowerCamelCase : bool = field(
default=UpperCamelCase , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def _a ( self : List[str] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["""tf"""] )
__UpperCAmelCase =None
if self.tpu:
try:
if self.tpu_name:
__UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__UpperCAmelCase =None
return tpu
@cached_property
def _a ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__UpperCAmelCase =tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
__UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
__UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def _a ( self : Optional[Any] ) -> bool:
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self : str ) -> "tf.distribute.Strategy":
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self : Dict ) -> Optional[int]:
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self : List[str] ) -> int:
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self : List[str] ) -> bool:
return self.n_gpu > 0
| 68 | 1 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : int = 0
lowerCamelCase : bool = False
lowerCamelCase : float = 3.0
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Tuple ) -> Dict:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=__SCREAMING_SNAKE_CASE ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def _a ( self : str ) -> Optional[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
__UpperCAmelCase =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__UpperCAmelCase =Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCAmelCase =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , __SCREAMING_SNAKE_CASE )
@require_multi_gpu
def _a ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase =["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if __name__ == "__main__":
__A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__A = Accelerator(kwargs_handlers=[ddp_scaler])
__A = torch.nn.Linear(1_00, 2_00)
__A = accelerator.prepare(model)
# Check the values changed in kwargs
__A = ""
__A = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 68 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _a ( self : int ) -> Union[str, Any]:
__UpperCAmelCase =self.dummy_uncond_unet
__UpperCAmelCase =ScoreSdeVeScheduler()
__UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
sde_ve.to(__SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.manual_seed(0 )
__UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =torch.manual_seed(0 )
__UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )[
0
]
__UpperCAmelCase =image[0, -3:, -3:, -1]
__UpperCAmelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ) -> int:
__UpperCAmelCase ="""google/ncsnpp-church-256"""
__UpperCAmelCase =UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ScoreSdeVeScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
sde_ve.to(__SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.manual_seed(0 )
__UpperCAmelCase =sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCAmelCase =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 68 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : pyspark.sql.DataFrame , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : str = "arrow" , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> Optional[int]:
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =load_from_cache_file
__UpperCAmelCase =file_format
__UpperCAmelCase =Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def _a ( self : Any ) -> Tuple:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__UpperCAmelCase =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 68 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__A = logging.get_logger(__name__)
__A = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=None , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if config is None:
assert isinstance(self.model , __SCREAMING_SNAKE_CASE ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
__UpperCAmelCase =self.model.config
else:
__UpperCAmelCase =config
__UpperCAmelCase =data_args
__UpperCAmelCase =self.config.tgt_vocab_size if isinstance(self.config , __SCREAMING_SNAKE_CASE ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
__UpperCAmelCase =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__UpperCAmelCase =label_smoothed_nll_loss
def _a ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Any:
if self.optimizer is None:
__UpperCAmelCase =["""bias""", """LayerNorm.weight"""]
__UpperCAmelCase =[
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
__UpperCAmelCase =Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__UpperCAmelCase =Adafactor
__UpperCAmelCase ={"""scale_parameter""": False, """relative_step""": False}
else:
__UpperCAmelCase =AdamW
__UpperCAmelCase ={
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
__UpperCAmelCase =self.args.learning_rate
if self.sharded_ddp:
__UpperCAmelCase =OSS(
params=__SCREAMING_SNAKE_CASE , optim=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
__UpperCAmelCase =optimizer_cls(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.lr_scheduler is None:
__UpperCAmelCase =self._get_lr_scheduler(__SCREAMING_SNAKE_CASE )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
__UpperCAmelCase =arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__UpperCAmelCase =schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__UpperCAmelCase =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__UpperCAmelCase =schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE )
return scheduler
def _a ( self : Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
__UpperCAmelCase =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__UpperCAmelCase , __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[:2]
else:
# compute label smoothed loss
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
__UpperCAmelCase =torch.nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 )
__UpperCAmelCase , __UpperCAmelCase =self.loss_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
__UpperCAmelCase =inputs.pop("""labels""" )
__UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return loss
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, Union[torch.Tensor, Any]] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
__UpperCAmelCase =self._prepare_inputs(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__UpperCAmelCase =self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__SCREAMING_SNAKE_CASE , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
__UpperCAmelCase =inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
__UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__UpperCAmelCase =generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
# If PAD token is not defined at least EOS token has to be defined
__UpperCAmelCase =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
__UpperCAmelCase =pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__UpperCAmelCase =tensor
return padded_tensor
| 68 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 68 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str=0 ) -> Any:
__UpperCAmelCase =floats_tensor((1, 3, 128, 128) , rng=random.Random(__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =np.random.RandomState(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : Optional[Any] ) -> int:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# warmup pass to apply optimizations
__UpperCAmelCase =pipe(**self.get_dummy_inputs() )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self : List[str] ) -> Optional[int]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Dict ) -> int:
__UpperCAmelCase =ort.SessionOptions()
__UpperCAmelCase =False
return options
def _a ( self : Dict ) -> Any:
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase =init_image.resize((768, 512) )
# using the PNDM scheduler by default
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""A fantasy landscape, trending on artstation"""
__UpperCAmelCase =np.random.RandomState(0 )
__UpperCAmelCase =pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
__UpperCAmelCase =output.images
__UpperCAmelCase =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase =np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _a ( self : List[str] ) -> str:
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase =init_image.resize((768, 512) )
__UpperCAmelCase =LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""A fantasy landscape, trending on artstation"""
__UpperCAmelCase =np.random.RandomState(0 )
__UpperCAmelCase =pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
__UpperCAmelCase =output.images
__UpperCAmelCase =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase =np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 68 | 1 |
def lowercase__ ( A_: str ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(A_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 68 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__A = logging.getLogger(__name__)
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = 'sequence-classification'
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
if type(__SCREAMING_SNAKE_CASE ) == dict:
__UpperCAmelCase =Namespace(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =glue_output_modes[hparams.task]
__UpperCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.mode )
def _a ( self : str , **__SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
return self.model(**__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
__UpperCAmelCase ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__UpperCAmelCase =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
__UpperCAmelCase =self(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =outputs[0]
__UpperCAmelCase =self.trainer.lr_schedulers[0]["""scheduler"""]
__UpperCAmelCase ={"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _a ( self : Tuple ) -> List[Any]:
__UpperCAmelCase =self.hparams
__UpperCAmelCase =processors[args.task]()
__UpperCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
__UpperCAmelCase =self._feature_file(__SCREAMING_SNAKE_CASE )
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
__UpperCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
__UpperCAmelCase =convert_examples_to_features(
__SCREAMING_SNAKE_CASE , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __SCREAMING_SNAKE_CASE )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> DataLoader:
__UpperCAmelCase ="""dev""" if mode == """test""" else mode
__UpperCAmelCase =self._feature_file(__SCREAMING_SNAKE_CASE )
logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.load(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__UpperCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__UpperCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__UpperCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__UpperCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , batch_size=__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int ) -> str:
__UpperCAmelCase ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__UpperCAmelCase =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
__UpperCAmelCase =self(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =outputs[:2]
__UpperCAmelCase =logits.detach().cpu().numpy()
__UpperCAmelCase =inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Any ) -> tuple:
__UpperCAmelCase =torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
__UpperCAmelCase =np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__UpperCAmelCase =np.argmax(__SCREAMING_SNAKE_CASE , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__UpperCAmelCase =np.squeeze(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =np.concatenate([x["""target"""] for x in outputs] , axis=0 )
__UpperCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
__UpperCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
__UpperCAmelCase ={**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
__UpperCAmelCase =dict(results.items() )
__UpperCAmelCase =results
return ret, preds_list, out_label_list
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : list ) -> dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._eval_end(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._eval_end(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _a ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def lowercase__ ( ) -> str:
"""simple docstring"""
__UpperCAmelCase =argparse.ArgumentParser()
add_generic_args(A_ , os.getcwd() )
__UpperCAmelCase =GLUETransformer.add_model_specific_args(A_ , os.getcwd() )
__UpperCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__UpperCAmelCase =os.path.join(
"""./results""" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__UpperCAmelCase =GLUETransformer(A_ )
__UpperCAmelCase =generic_train(A_ , A_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__UpperCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=A_ ) )
__UpperCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(A_ )
if __name__ == "__main__":
main()
| 68 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str=7 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=18 , __SCREAMING_SNAKE_CASE : Tuple=30 , __SCREAMING_SNAKE_CASE : Union[str, Any]=400 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __SCREAMING_SNAKE_CASE : int=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __SCREAMING_SNAKE_CASE : Dict=True , ) -> Optional[Any]:
__UpperCAmelCase =size if size is not None else {"""height""": 224, """width""": 224}
__UpperCAmelCase =crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =image_size
__UpperCAmelCase =min_resolution
__UpperCAmelCase =max_resolution
__UpperCAmelCase =do_resize
__UpperCAmelCase =size
__UpperCAmelCase =do_center_crop
__UpperCAmelCase =crop_size
__UpperCAmelCase =do_normalize
__UpperCAmelCase =image_mean
__UpperCAmelCase =image_std
__UpperCAmelCase =do_convert_rgb
def _a ( self : Any ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def _a ( self : str , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : int=False ) -> List[Any]:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__UpperCAmelCase =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
__UpperCAmelCase =[]
for i in range(self.batch_size ):
__UpperCAmelCase , __UpperCAmelCase =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__UpperCAmelCase =[Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
if torchify:
__UpperCAmelCase =[torch.from_numpy(__SCREAMING_SNAKE_CASE ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def _a ( self : List[Any] ) -> List[Any]:
__UpperCAmelCase =ChineseCLIPImageProcessingTester(self , do_center_crop=__SCREAMING_SNAKE_CASE )
@property
def _a ( self : Optional[int] ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_center_crop""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """center_crop""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_convert_rgb""" ) )
def _a ( self : Optional[int] ) -> str:
__UpperCAmelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 224, """width""": 224} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
__UpperCAmelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _a ( self : Optional[int] ) -> Optional[int]:
pass
def _a ( self : Optional[int] ) -> Optional[int]:
# Initialize image_processing
__UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase =self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__UpperCAmelCase =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__UpperCAmelCase =image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self : Tuple ) -> Union[str, Any]:
# Initialize image_processing
__UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase =self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__UpperCAmelCase =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__UpperCAmelCase =image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self : Tuple ) -> Any:
# Initialize image_processing
__UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase =self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__UpperCAmelCase =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__UpperCAmelCase =image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def _a ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =3
@property
def _a ( self : Optional[Any] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Dict ) -> Optional[int]:
__UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_center_crop""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """center_crop""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_convert_rgb""" ) )
def _a ( self : List[Any] ) -> Tuple:
pass
def _a ( self : List[Any] ) -> Dict:
# Initialize image_processing
__UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase =self.image_processor_tester.prepare_inputs(equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__UpperCAmelCase =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__UpperCAmelCase =image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 68 |
def lowercase__ ( A_: int , A_: int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowercase__ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 68 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Any = 'xmod'
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : List[Any]=30522 , __SCREAMING_SNAKE_CASE : List[Any]=768 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Tuple=3072 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1e-12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Any="absolute" , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Any=("en_XX",) , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Dict , ) -> Optional[Any]:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =hidden_act
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =position_embedding_type
__UpperCAmelCase =use_cache
__UpperCAmelCase =classifier_dropout
__UpperCAmelCase =pre_norm
__UpperCAmelCase =adapter_reduction_factor
__UpperCAmelCase =adapter_layer_norm
__UpperCAmelCase =adapter_reuse_layer_norm
__UpperCAmelCase =ln_before_adapter
__UpperCAmelCase =list(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =default_language
class _A ( UpperCamelCase ):
"""simple docstring"""
@property
def _a ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCAmelCase ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__UpperCAmelCase ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 68 |
from __future__ import annotations
import bisect
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
__UpperCAmelCase =len(A_ )
while lo < hi:
__UpperCAmelCase =lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__UpperCAmelCase =mid + 1
else:
__UpperCAmelCase =mid
return lo
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
__UpperCAmelCase =len(A_ )
while lo < hi:
__UpperCAmelCase =lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__UpperCAmelCase =mid + 1
else:
__UpperCAmelCase =mid
return lo
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_left(A_ , A_ , A_ , A_ ) , A_ )
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_right(A_ , A_ , A_ , A_ ) , A_ )
def lowercase__ ( A_: list[int] , A_: int ) -> int | None:
"""simple docstring"""
__UpperCAmelCase =0
__UpperCAmelCase =len(A_ ) - 1
while left <= right:
__UpperCAmelCase =left + (right - left) // 2
__UpperCAmelCase =sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__UpperCAmelCase =midpoint - 1
else:
__UpperCAmelCase =midpoint + 1
return None
def lowercase__ ( A_: list[int] , A_: int ) -> int | None:
"""simple docstring"""
__UpperCAmelCase =bisect.bisect_left(A_ , A_ )
if index != len(A_ ) and sorted_collection[index] == item:
return index
return None
def lowercase__ ( A_: list[int] , A_: int , A_: int , A_: int ) -> int | None:
"""simple docstring"""
if right < left:
return None
__UpperCAmelCase =left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(A_ , A_ , A_ , midpoint - 1 )
else:
return binary_search_by_recursion(A_ , A_ , midpoint + 1 , A_ )
if __name__ == "__main__":
__A = input("Enter numbers separated by comma:\n").strip()
__A = sorted(int(item) for item in user_input.split(","))
__A = int(input("Enter a single number to be found in the list:\n"))
__A = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 68 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowercase__ ( A_: int ) -> Optional[Any]:
"""simple docstring"""
if "resnet-50" in model_name:
__UpperCAmelCase =ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
__UpperCAmelCase =ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
__UpperCAmelCase =DetrConfig(use_timm_backbone=A_ , backbone_config=A_ )
# set label attributes
__UpperCAmelCase ="""panoptic""" in model_name
if is_panoptic:
__UpperCAmelCase =250
else:
__UpperCAmelCase =91
__UpperCAmelCase ="""huggingface/label-files"""
__UpperCAmelCase ="""coco-detection-id2label.json"""
__UpperCAmelCase =json.load(open(hf_hub_download(A_ , A_ , repo_type="""dataset""" ) , """r""" ) )
__UpperCAmelCase ={int(A_ ): v for k, v in idalabel.items()}
__UpperCAmelCase =idalabel
__UpperCAmelCase ={v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowercase__ ( A_: int ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase =[]
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def lowercase__ ( A_: Dict , A_: str , A_: str ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase =state_dict.pop(A_ )
__UpperCAmelCase =val
def lowercase__ ( A_: Union[str, Any] , A_: List[str]=False ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase =""""""
if is_panoptic:
__UpperCAmelCase ="""detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCAmelCase =state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__UpperCAmelCase =state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase =in_proj_weight[:256, :]
__UpperCAmelCase =in_proj_bias[:256]
__UpperCAmelCase =in_proj_weight[256:512, :]
__UpperCAmelCase =in_proj_bias[256:512]
__UpperCAmelCase =in_proj_weight[-256:, :]
__UpperCAmelCase =in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__UpperCAmelCase =state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
__UpperCAmelCase =state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase =in_proj_weight[:256, :]
__UpperCAmelCase =in_proj_bias[:256]
__UpperCAmelCase =in_proj_weight[256:512, :]
__UpperCAmelCase =in_proj_bias[256:512]
__UpperCAmelCase =in_proj_weight[-256:, :]
__UpperCAmelCase =in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__UpperCAmelCase =state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
__UpperCAmelCase =state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__UpperCAmelCase =in_proj_weight_cross_attn[:256, :]
__UpperCAmelCase =in_proj_bias_cross_attn[:256]
__UpperCAmelCase =in_proj_weight_cross_attn[256:512, :]
__UpperCAmelCase =in_proj_bias_cross_attn[256:512]
__UpperCAmelCase =in_proj_weight_cross_attn[-256:, :]
__UpperCAmelCase =in_proj_bias_cross_attn[-256:]
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase =Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def lowercase__ ( A_: List[str] , A_: Dict=None , A_: Optional[int]=False ) -> int:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase =get_detr_config(A_ )
# load original model from torch hub
__UpperCAmelCase ={
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(F'''Converting model {model_name}...''' )
__UpperCAmelCase =torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=A_ ).eval()
__UpperCAmelCase =detr.state_dict()
# rename keys
for src, dest in create_rename_keys(A_ ):
if is_panoptic:
__UpperCAmelCase ="""detr.""" + src
rename_key(A_ , A_ , A_ )
# query, key and value matrices need special treatment
read_in_q_k_v(A_ , is_panoptic=A_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCAmelCase ="""detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
__UpperCAmelCase =state_dict.pop(A_ )
__UpperCAmelCase =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__UpperCAmelCase =state_dict.pop(A_ )
__UpperCAmelCase =val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
__UpperCAmelCase =state_dict.pop(A_ )
__UpperCAmelCase =val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
__UpperCAmelCase =state_dict.pop(A_ )
__UpperCAmelCase =val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase =DetrForSegmentation(A_ ) if is_panoptic else DetrForObjectDetection(A_ )
model.load_state_dict(A_ )
model.eval()
# verify our conversion on an image
__UpperCAmelCase ="""coco_panoptic""" if is_panoptic else """coco_detection"""
__UpperCAmelCase =DetrImageProcessor(format=A_ )
__UpperCAmelCase =processor(images=prepare_img() , return_tensors="""pt""" )
__UpperCAmelCase =encoding["""pixel_values"""]
__UpperCAmelCase =detr(A_ )
__UpperCAmelCase =model(A_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
processor.save_pretrained(A_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
__A = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 68 |
from typing import List
from .keymap import KEYMAP, get_character
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
def decorator(A_: int ):
__UpperCAmelCase =getattr(A_ , """handle_key""" , [] )
handle += [key]
setattr(A_ , """handle_key""" , A_ )
return func
return decorator
def lowercase__ ( *A_: List[str] ) -> Optional[int]:
"""simple docstring"""
def decorator(A_: Tuple ):
__UpperCAmelCase =getattr(A_ , """handle_key""" , [] )
handle += keys
setattr(A_ , """handle_key""" , A_ )
return func
return decorator
class _A ( UpperCamelCase ):
"""simple docstring"""
def __new__( cls : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> int:
__UpperCAmelCase =super().__new__(cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not hasattr(__SCREAMING_SNAKE_CASE , """key_handler""" ):
setattr(__SCREAMING_SNAKE_CASE , """key_handler""" , {} )
setattr(__SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] )
for key in handled_keys:
__UpperCAmelCase =value
return new_cls
@staticmethod
def _a ( cls : Dict ) -> List[Any]:
__UpperCAmelCase =get_character()
if char != KEYMAP["undefined"]:
__UpperCAmelCase =ord(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =cls.key_handler.get(__SCREAMING_SNAKE_CASE )
if handler:
__UpperCAmelCase =char
return handler(cls )
else:
return None
def lowercase__ ( cls: str ) -> int:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 68 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 68 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 68 | 1 |
from __future__ import annotations
__A = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowercase__ ( A_: list[list[int]] , A_: list[int] , A_: list[int] , A_: int , A_: list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__UpperCAmelCase =[
[0 for col in range(len(grid[0] ) )] for row in range(len(A_ ) )
] # the reference grid
__UpperCAmelCase =1
__UpperCAmelCase =[
[0 for col in range(len(grid[0] ) )] for row in range(len(A_ ) )
] # the action grid
__UpperCAmelCase =init[0]
__UpperCAmelCase =init[1]
__UpperCAmelCase =0
__UpperCAmelCase =g + heuristic[x][y] # cost from starting cell to destination cell
__UpperCAmelCase =[[f, g, x, y]]
__UpperCAmelCase =False # flag that is set when search is complete
__UpperCAmelCase =False # flag set if we can't find expand
while not found and not resign:
if len(A_ ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__UpperCAmelCase =cell.pop()
__UpperCAmelCase =next_cell[2]
__UpperCAmelCase =next_cell[3]
__UpperCAmelCase =next_cell[1]
if x == goal[0] and y == goal[1]:
__UpperCAmelCase =True
else:
for i in range(len(A_ ) ): # to try out different valid actions
__UpperCAmelCase =x + DIRECTIONS[i][0]
__UpperCAmelCase =y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(A_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__UpperCAmelCase =g + cost
__UpperCAmelCase =ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__UpperCAmelCase =1
__UpperCAmelCase =i
__UpperCAmelCase =[]
__UpperCAmelCase =goal[0]
__UpperCAmelCase =goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__UpperCAmelCase =x - DIRECTIONS[action[x][y]][0]
__UpperCAmelCase =y - DIRECTIONS[action[x][y]][1]
__UpperCAmelCase =xa
__UpperCAmelCase =ya
invpath.append([x, y] )
__UpperCAmelCase =[]
for i in range(len(A_ ) ):
path.append(invpath[len(A_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
__A = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__A = [0, 0]
# all coordinates are given in format [y,x]
__A = [len(grid) - 1, len(grid[0]) - 1]
__A = 1
# the cost map which pushes the path closer to the goal
__A = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__A = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__A = 99
__A , __A = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 68 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]=99 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=4 , ) -> Optional[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =seq_length
__UpperCAmelCase =is_training
__UpperCAmelCase =use_attention_mask
__UpperCAmelCase =use_token_type_ids
__UpperCAmelCase =use_labels
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =num_choices
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase =None
if self.use_attention_mask:
__UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase =None
if self.use_token_type_ids:
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase =True
__UpperCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =FlaxRobertaModelTester(self )
@slow
def _a ( self : Optional[Any] ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase =model_class_name.from_pretrained("""roberta-base""" , from_pt=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 68 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase =params
__UpperCAmelCase =np.array(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =np.array([len(__SCREAMING_SNAKE_CASE ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : int , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
return (self.token_ids[index], self.lengths[index])
def __len__( self : int ) -> Optional[Any]:
return len(self.lengths )
def _a ( self : Dict ) -> List[str]:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _a ( self : List[Any] ) -> List[Any]:
__UpperCAmelCase =self.params.max_model_input_size
__UpperCAmelCase =self.lengths > max_len
logger.info(f'''Splitting {sum(__SCREAMING_SNAKE_CASE )} too long sequences.''' )
def divide_chunks(__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple ):
return [l[i : i + n] for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )]
__UpperCAmelCase =[]
__UpperCAmelCase =[]
if self.params.mlm:
__UpperCAmelCase , __UpperCAmelCase =self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__UpperCAmelCase , __UpperCAmelCase =self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__UpperCAmelCase =[]
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__UpperCAmelCase =np.insert(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE )
if sub_s[-1] != sep_id:
__UpperCAmelCase =np.insert(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__SCREAMING_SNAKE_CASE )
new_tok_ids.extend(__SCREAMING_SNAKE_CASE )
new_lengths.extend([len(__SCREAMING_SNAKE_CASE ) for l in sub_seqs] )
__UpperCAmelCase =np.array(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =np.array(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase =len(self )
__UpperCAmelCase =self.lengths > 11
__UpperCAmelCase =self.token_ids[indices]
__UpperCAmelCase =self.lengths[indices]
__UpperCAmelCase =len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def _a ( self : int ) -> Any:
if "unk_token" not in self.params.special_tok_ids:
return
else:
__UpperCAmelCase =self.params.special_tok_ids["""unk_token"""]
__UpperCAmelCase =len(self )
__UpperCAmelCase =np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__UpperCAmelCase =(unk_occs / self.lengths) < 0.5
__UpperCAmelCase =self.token_ids[indices]
__UpperCAmelCase =self.lengths[indices]
__UpperCAmelCase =len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def _a ( self : Any ) -> Tuple:
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
__UpperCAmelCase =[t[0] for t in batch]
__UpperCAmelCase =[t[1] for t in batch]
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
# Max for paddings
__UpperCAmelCase =max(__SCREAMING_SNAKE_CASE )
# Pad token ids
if self.params.mlm:
__UpperCAmelCase =self.params.special_tok_ids["""pad_token"""]
else:
__UpperCAmelCase =self.params.special_tok_ids["""unk_token"""]
__UpperCAmelCase =[list(t.astype(__SCREAMING_SNAKE_CASE ) ) + [pad_idx] * (max_seq_len_ - len(__SCREAMING_SNAKE_CASE )) for t in token_ids]
assert len(tk_ ) == len(__SCREAMING_SNAKE_CASE )
assert all(len(__SCREAMING_SNAKE_CASE ) == max_seq_len_ for t in tk_ )
__UpperCAmelCase =torch.tensor(tk_ ) # (bs, max_seq_len_)
__UpperCAmelCase =torch.tensor(__SCREAMING_SNAKE_CASE ) # (bs)
return tk_t, lg_t
| 68 |
from __future__ import annotations
def lowercase__ ( A_: list[list[int]] ) -> int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 1 |
import comet # From: unbabel-comet
import torch
import datasets
__A = datasets.logging.get_logger(__name__)
__A = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
__A = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
__A = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
"""simple docstring"""
def _a ( self : Optional[Any] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
if self.config_name == "default":
__UpperCAmelCase =comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
__UpperCAmelCase =comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : List[str]=False ) -> Union[str, Any]:
if gpus is None:
__UpperCAmelCase =1 if torch.cuda.is_available() else 0
__UpperCAmelCase ={"""src""": sources, """mt""": predictions, """ref""": references}
__UpperCAmelCase =[dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) for t in zip(*data.values() )]
__UpperCAmelCase , __UpperCAmelCase =self.scorer.predict(__SCREAMING_SNAKE_CASE , gpus=__SCREAMING_SNAKE_CASE , progress_bar=__SCREAMING_SNAKE_CASE )
return {"mean_score": mean_score, "scores": scores}
| 68 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowercase__ ( A_: int , A_: int , A_: int , A_: int , A_: int , A_: int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__UpperCAmelCase =ksize + 1
__UpperCAmelCase =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(A_ ):
for x in range(A_ ):
# distance from center
__UpperCAmelCase =x - ksize // 2
__UpperCAmelCase =y - ksize // 2
# degree to radiant
__UpperCAmelCase =theta / 180 * np.pi
__UpperCAmelCase =np.cos(_theta )
__UpperCAmelCase =np.sin(_theta )
# get kernel x
__UpperCAmelCase =cos_theta * px + sin_theta * py
# get kernel y
__UpperCAmelCase =-sin_theta * px + cos_theta * py
# fill kernel
__UpperCAmelCase =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__A = imread("../image_data/lena.jpg")
# turn image in gray scale value
__A = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__A = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__A = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__A = out / out.max() * 2_55
__A = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 68 | 1 |
from __future__ import annotations
import math
from collections.abc import Callable
def lowercase__ ( A_: Callable[[int | float], int | float] , A_: int | float , A_: int | float , A_: int = 100 , ) -> float:
"""simple docstring"""
__UpperCAmelCase =x_start
__UpperCAmelCase =fnc(A_ )
__UpperCAmelCase =0.0
for _ in range(A_ ):
# Approximates curve as a sequence of linear lines and sums their length
__UpperCAmelCase =(x_end - x_start) / steps + xa
__UpperCAmelCase =fnc(A_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__UpperCAmelCase =xa
__UpperCAmelCase =fxa
return length
if __name__ == "__main__":
def lowercase__ ( A_: Any ) -> Any:
"""simple docstring"""
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
__A = 10
while i <= 10_00_00:
print(F"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 68 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 2, 1] , __SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 4] , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Any=2.0 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Tuple=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : Dict=8 , ) -> List[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =image_size
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =embed_dim
__UpperCAmelCase =depths
__UpperCAmelCase =num_heads
__UpperCAmelCase =window_size
__UpperCAmelCase =mlp_ratio
__UpperCAmelCase =qkv_bias
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =drop_path_rate
__UpperCAmelCase =hidden_act
__UpperCAmelCase =use_absolute_embeddings
__UpperCAmelCase =patch_norm
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =initializer_range
__UpperCAmelCase =is_training
__UpperCAmelCase =scope
__UpperCAmelCase =use_labels
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =encoder_stride
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase =None
if self.use_labels:
__UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase =self.get_config()
return config, pixel_values, labels
def _a ( self : List[Any] ) -> Optional[Any]:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
__UpperCAmelCase =SwinvaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Tuple:
__UpperCAmelCase =SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase =1
__UpperCAmelCase =SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__UpperCAmelCase =self.type_sequence_label_size
__UpperCAmelCase =SwinvaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : List[str] ) -> Tuple:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCamelCase : Tuple = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Dict = False
lowerCamelCase : Tuple = False
lowerCamelCase : List[str] = False
lowerCamelCase : Tuple = False
def _a ( self : str ) -> str:
__UpperCAmelCase =SwinvaModelTester(self )
__UpperCAmelCase =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 )
def _a ( self : List[Any] ) -> Optional[int]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : str ) -> str:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def _a ( self : Tuple ) -> Tuple:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def _a ( self : Optional[Any] ) -> int:
pass
def _a ( self : Tuple ) -> int:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _a ( self : str ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase =[*signature.parameters.keys()]
__UpperCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =True
for model_class in self.all_model_classes:
__UpperCAmelCase =True
__UpperCAmelCase =False
__UpperCAmelCase =True
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
__UpperCAmelCase =len(self.model_tester.depths )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase =True
__UpperCAmelCase =config.window_size**2
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__UpperCAmelCase =len(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__UpperCAmelCase =True
__UpperCAmelCase =True
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
__UpperCAmelCase =self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__UpperCAmelCase =2
self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.hidden_states
__UpperCAmelCase =getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# Swinv2 has a different seq_length
__UpperCAmelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase =outputs.reshaped_hidden_states
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =reshaped_hidden_states[0].shape
__UpperCAmelCase =(
reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a ( self : str ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =3
__UpperCAmelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Dict:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _a ( self : int ) -> Dict:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase =SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _a ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =_config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ) -> Dict:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def _a ( self : int ) -> Optional[int]:
__UpperCAmelCase =SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.default_image_processor
__UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCAmelCase =image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__UpperCAmelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 68 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 68 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "spiece.model"}
__A = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
__A = {
"AI-Sweden/gpt-sw3-126m": 20_48,
"AI-Sweden/gpt-sw3-350m": 20_48,
"AI-Sweden/gpt-sw3-1.6b": 20_48,
"AI-Sweden/gpt-sw3-6.7b": 20_48,
"AI-Sweden/gpt-sw3-20b": 20_48,
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
__UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCAmelCase =kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
__UpperCAmelCase ="""None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__UpperCAmelCase ="""<|endoftext|>""" if eos_token is None else eos_token
__UpperCAmelCase ="""<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__UpperCAmelCase =unk_token if pad_token is None else pad_token
__UpperCAmelCase =eos_token if bos_token is None else bos_token
else:
__UpperCAmelCase ="""<pad>""" if pad_token is None else pad_token
__UpperCAmelCase ="""<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =do_lower_case
__UpperCAmelCase =remove_space
__UpperCAmelCase =keep_accents
__UpperCAmelCase =vocab_file
__UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# Used for whitespace normalization in input texts
# fmt : off
__UpperCAmelCase ={""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__UpperCAmelCase =re.compile(
f'''[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__( self : Any ) -> str:
__UpperCAmelCase =self.__dict__.copy()
__UpperCAmelCase =None
return state
def __setstate__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCAmelCase ={}
__UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _a ( self : Union[str, Any] ) -> int:
return len(self.sp_model )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str ) -> str:
__UpperCAmelCase =self.non_printing_characters_re.sub("""""" , __SCREAMING_SNAKE_CASE )
# Normalize whitespaces
__UpperCAmelCase ="""""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
__UpperCAmelCase =unicodedata.normalize("""NFC""" , __SCREAMING_SNAKE_CASE )
return text
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
__UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE )
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> int:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> str:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
@staticmethod
def _a ( __SCREAMING_SNAKE_CASE : str ) -> str:
return out_string
def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> str:
__UpperCAmelCase =[]
__UpperCAmelCase =""""""
__UpperCAmelCase =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
__UpperCAmelCase =True
__UpperCAmelCase =[]
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string
def _a ( self : Any ) -> Dict[str, int]:
__UpperCAmelCase ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase =os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi:
__UpperCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase =[self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text]
__UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE )
if return_tensors is True or return_tensors == "pt":
__UpperCAmelCase =torch.tensor(__SCREAMING_SNAKE_CASE )
return token_ids
def _a ( self : str , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str:
return self.sp_model.decode(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]:
__UpperCAmelCase =[f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
__UpperCAmelCase =(
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(__SCREAMING_SNAKE_CASE ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=__SCREAMING_SNAKE_CASE )
| 68 | 1 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowercase__ ( A_: Dict ) -> str:
"""simple docstring"""
if isinstance(A_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _A :
"""simple docstring"""
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ) -> List[str]:
pass
def _a ( self : Optional[int] ) -> Tuple:
pass
def _a ( self : int ) -> str:
pass
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[Any]:
__UpperCAmelCase =VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =TFVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase =self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={"""vision_model""": vision_model, """text_model""": text_model}
__UpperCAmelCase =TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int]=None , **__SCREAMING_SNAKE_CASE : int ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =TFVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =after_output[0].numpy()
__UpperCAmelCase =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-5 )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase =self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(
input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =output.vision_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCAmelCase =to_atuple(vision_model.config.image_size )
__UpperCAmelCase =to_atuple(vision_model.config.patch_size )
__UpperCAmelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__UpperCAmelCase =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__UpperCAmelCase =output.text_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float ) -> Tuple:
__UpperCAmelCase =np.abs((a - b) ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _a ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] ) -> int:
__UpperCAmelCase =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ) -> Any:
__UpperCAmelCase =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ) -> Dict:
__UpperCAmelCase =self.prepare_config_and_inputs()
self.check_save_load(**__SCREAMING_SNAKE_CASE )
def _a ( self : Any ) -> Dict:
__UpperCAmelCase =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__SCREAMING_SNAKE_CASE )
@slow
def _a ( self : Any ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase =self.get_pretrained_model_and_inputs()
__UpperCAmelCase =model_a(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =TFVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model_a(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =after_outputs[0].numpy()
__UpperCAmelCase =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-5 )
@require_tf
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
def _a ( self : str ) -> List[Any]:
__UpperCAmelCase =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
__UpperCAmelCase =13
__UpperCAmelCase =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__UpperCAmelCase =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__UpperCAmelCase =random_attention_mask([batch_size, 4] )
__UpperCAmelCase ={"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
__UpperCAmelCase =TFViTModel(__SCREAMING_SNAKE_CASE , name="""vision_model""" )
__UpperCAmelCase =TFBertModel(__SCREAMING_SNAKE_CASE , name="""text_model""" )
return vision_model, text_model
def _a ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase =TFViTModelTester(self )
__UpperCAmelCase =TFBertModelTester(self )
__UpperCAmelCase =vit_model_tester.prepare_config_and_inputs()
__UpperCAmelCase =bert_model_tester.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =vision_config_and_inputs
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ) -> int:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
__UpperCAmelCase =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
__UpperCAmelCase =13
__UpperCAmelCase =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__UpperCAmelCase =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__UpperCAmelCase =random_attention_mask([batch_size, 4] )
__UpperCAmelCase ={"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict=None , **__SCREAMING_SNAKE_CASE : Any ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase =self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(
input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =output.vision_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__UpperCAmelCase =to_atuple(vision_model.config.image_size )
__UpperCAmelCase =to_atuple(vision_model.config.patch_size )
__UpperCAmelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__UpperCAmelCase =num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__UpperCAmelCase =output.text_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
__UpperCAmelCase =TFDeiTModel(__SCREAMING_SNAKE_CASE , name="""vision_model""" )
__UpperCAmelCase =TFRobertaModel(__SCREAMING_SNAKE_CASE , name="""text_model""" )
return vision_model, text_model
def _a ( self : Any ) -> Union[str, Any]:
__UpperCAmelCase =TFDeiTModelTester(self )
__UpperCAmelCase =TFRobertaModelTester(self )
__UpperCAmelCase =vit_model_tester.prepare_config_and_inputs()
__UpperCAmelCase =bert_model_tester.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =vision_config_and_inputs
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
def _a ( self : Any ) -> Optional[Any]:
__UpperCAmelCase =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
__UpperCAmelCase =13
__UpperCAmelCase =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__UpperCAmelCase =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__UpperCAmelCase =random_attention_mask([batch_size, 4] )
__UpperCAmelCase ={"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Tuple:
__UpperCAmelCase =TFCLIPVisionModel(__SCREAMING_SNAKE_CASE , name="""vision_model""" )
__UpperCAmelCase =TFBertModel(__SCREAMING_SNAKE_CASE , name="""text_model""" )
return vision_model, text_model
def _a ( self : int ) -> Tuple:
__UpperCAmelCase =TFCLIPVisionModelTester(self )
__UpperCAmelCase =TFBertModelTester(self )
__UpperCAmelCase =clip_model_tester.prepare_config_and_inputs()
__UpperCAmelCase =bert_model_tester.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase =vision_config_and_inputs
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _A ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase =TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCAmelCase =processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__UpperCAmelCase =np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 68 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__A = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Path , __SCREAMING_SNAKE_CASE : Union[str, None] = None , __SCREAMING_SNAKE_CASE : Union[List[str], None] = None , __SCREAMING_SNAKE_CASE : Union[str, List[str], None] = None , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[str]:
__UpperCAmelCase =[file for file in os.listdir(__SCREAMING_SNAKE_CASE ) if os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )]
if identifier is not None:
__UpperCAmelCase =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for n_ in n_identifier:
__UpperCAmelCase =[file for file in files if n_ not in file]
else:
__UpperCAmelCase =[file for file in files if n_identifier not in file]
__UpperCAmelCase =ignore_files or []
ignore_files.append("""__init__.py""" )
__UpperCAmelCase =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __SCREAMING_SNAKE_CASE )
if only_modules:
__UpperCAmelCase =file.split(""".""" )[0]
try:
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =doctest.DocTestSuite(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =unittest.TextTestRunner().run(__SCREAMING_SNAKE_CASE )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__UpperCAmelCase =doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _a ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""modeling"""
__UpperCAmelCase =[
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""tokenization"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""configuration"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ) -> Tuple:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase =["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , n_identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase =Path("""docs/source""" )
__UpperCAmelCase =["""favicon.ico"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE , only_modules=__SCREAMING_SNAKE_CASE )
| 68 | 1 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowercase__ ( A_: List[Any] , A_: List[Any] , A_: Dict ) -> str:
"""simple docstring"""
__UpperCAmelCase =WavaVecaForSequenceClassification.from_pretrained(A_ , config=A_ )
__UpperCAmelCase =downstream_dict["""projector.weight"""]
__UpperCAmelCase =downstream_dict["""projector.bias"""]
__UpperCAmelCase =downstream_dict["""model.post_net.linear.weight"""]
__UpperCAmelCase =downstream_dict["""model.post_net.linear.bias"""]
return model
def lowercase__ ( A_: Any , A_: List[str] , A_: Any ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase =WavaVecaForAudioFrameClassification.from_pretrained(A_ , config=A_ )
__UpperCAmelCase =downstream_dict["""model.linear.weight"""]
__UpperCAmelCase =downstream_dict["""model.linear.bias"""]
return model
def lowercase__ ( A_: List[str] , A_: Optional[int] , A_: List[Any] ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =WavaVecaForXVector.from_pretrained(A_ , config=A_ )
__UpperCAmelCase =downstream_dict["""connector.weight"""]
__UpperCAmelCase =downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__UpperCAmelCase =downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__UpperCAmelCase =downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__UpperCAmelCase =downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
__UpperCAmelCase =downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
__UpperCAmelCase =downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
__UpperCAmelCase =downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
__UpperCAmelCase =downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def lowercase__ ( A_: List[str] , A_: List[str] , A_: Any , A_: List[str] ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase =torch.load(A_ , map_location="""cpu""" )
__UpperCAmelCase =checkpoint["""Downstream"""]
__UpperCAmelCase =WavaVecaConfig.from_pretrained(A_ )
__UpperCAmelCase =WavaVecaFeatureExtractor.from_pretrained(
A_ , return_attention_mask=A_ , do_normalize=A_ )
__UpperCAmelCase =hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
__UpperCAmelCase =convert_classification(A_ , A_ , A_ )
elif arch.endswith("""ForAudioFrameClassification""" ):
__UpperCAmelCase =convert_diarization(A_ , A_ , A_ )
elif arch.endswith("""ForXVector""" ):
__UpperCAmelCase =convert_xvector(A_ , A_ , A_ )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__UpperCAmelCase =checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(A_ )
hf_model.save_pretrained(A_ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__A = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 68 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__A = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase__ ( A_: int , A_: Optional[Any] , A_: List[str]=None ) -> List[str]:
"""simple docstring"""
if rng is None:
__UpperCAmelCase =random.Random()
__UpperCAmelCase =1
for dim in shape:
total_dims *= dim
__UpperCAmelCase =[]
for _ in range(A_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__UpperCAmelCase =np.array(A_ , dtype=jnp.intaa ).reshape(A_ )
return output
def lowercase__ ( A_: List[str] , A_: List[str]=None ) -> Any:
"""simple docstring"""
__UpperCAmelCase =ids_tensor(A_ , vocab_size=2 , rng=A_ )
# make sure that at least one token is attended to for each batch
__UpperCAmelCase =1
return attn_mask
@require_flax
class _A :
"""simple docstring"""
lowerCamelCase : Optional[Any] = None
lowerCamelCase : int = ()
def _a ( self : str ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__UpperCAmelCase =2
__UpperCAmelCase =inputs["""input_ids"""].shape[-1] // 2
__UpperCAmelCase =inputs["""input_ids"""][:max_batch_size, :sequence_length]
__UpperCAmelCase =jnp.ones_like(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__UpperCAmelCase =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__UpperCAmelCase =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _a ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =0
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =pt_model_class(__SCREAMING_SNAKE_CASE ).eval()
__UpperCAmelCase =load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , flax_model.params )
__UpperCAmelCase =flax_model.generate(__SCREAMING_SNAKE_CASE ).sequences
__UpperCAmelCase =pt_model.generate(torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__UpperCAmelCase =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _a ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
__UpperCAmelCase =0.8
__UpperCAmelCase =10
__UpperCAmelCase =0.3
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =2
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ) -> Any:
__UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
__UpperCAmelCase =FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__UpperCAmelCase ="""Hello world"""
__UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """do_samples""" ):
model.generate(__SCREAMING_SNAKE_CASE , do_samples=__SCREAMING_SNAKE_CASE )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """foo""" ):
__UpperCAmelCase ={"""foo""": """bar"""}
model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 68 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = 'realm'
def __init__( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any]=30522 , __SCREAMING_SNAKE_CASE : List[str]=768 , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : Optional[int]=8 , __SCREAMING_SNAKE_CASE : int=3072 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu_new" , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Any=1e-12 , __SCREAMING_SNAKE_CASE : List[str]=256 , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : Any=1e-3 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : Any=320 , __SCREAMING_SNAKE_CASE : List[Any]=13353718 , __SCREAMING_SNAKE_CASE : List[str]=5000 , __SCREAMING_SNAKE_CASE : List[str]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , **__SCREAMING_SNAKE_CASE : int , ) -> Optional[Any]:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# Common config
__UpperCAmelCase =vocab_size
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =hidden_size
__UpperCAmelCase =retriever_proj_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =num_candidates
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =initializer_range
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =layer_norm_eps
# Reader config
__UpperCAmelCase =span_hidden_size
__UpperCAmelCase =max_span_width
__UpperCAmelCase =reader_layer_norm_eps
__UpperCAmelCase =reader_beam_size
__UpperCAmelCase =reader_seq_len
# Retrieval config
__UpperCAmelCase =num_block_records
__UpperCAmelCase =searcher_beam_size
| 68 |
from __future__ import annotations
from collections.abc import Iterator
class _A :
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> None:
__UpperCAmelCase =value
__UpperCAmelCase =None
__UpperCAmelCase =None
class _A :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Node ) -> None:
__UpperCAmelCase =tree
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Node | None ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 1 |
def lowercase__ ( A_: int , A_: int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def lowercase__ ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 68 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowercase__ ( A_: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A_ , AssumeRolePolicyDocument=json.dumps(A_ , indent=2 ) )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def lowercase__ ( A_: Dict ) -> Any:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
return iam_client.get_role(RoleName=A_ )["Role"]["Arn"]
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =_ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , A_ , )
__UpperCAmelCase =None
if credentials_configuration == 0:
__UpperCAmelCase =_ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
__UpperCAmelCase =aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__UpperCAmelCase =_ask_field("""AWS Access Key ID: """ )
__UpperCAmelCase =aws_access_key_id
__UpperCAmelCase =_ask_field("""AWS Secret Access Key: """ )
__UpperCAmelCase =aws_secret_access_key
__UpperCAmelCase =_ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
__UpperCAmelCase =aws_region
__UpperCAmelCase =_ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , A_ , )
if role_management == 0:
__UpperCAmelCase =_ask_field("""Enter your IAM role name: """ )
else:
__UpperCAmelCase ="""accelerate_sagemaker_execution_role"""
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(A_ )
__UpperCAmelCase =_ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_custom_docker_image:
__UpperCAmelCase =_ask_field("""Enter your Docker image: """ , lambda A_ : str(A_ ).lower() )
__UpperCAmelCase =_ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_inputs_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_metrics_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
__UpperCAmelCase ={}
__UpperCAmelCase =_ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
__UpperCAmelCase ="""dynamo_"""
__UpperCAmelCase =_ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__UpperCAmelCase =_ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
__UpperCAmelCase =_ask_options(
"""Which mode do you want to use?""" , A_ , lambda A_ : TORCH_DYNAMO_MODES[int(A_ )] , default="""default""" , )
__UpperCAmelCase =_ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase ="""Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__UpperCAmelCase =_ask_options(
A_ , A_ , lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__UpperCAmelCase =_ask_field(A_ , lambda A_ : str(A_ ).lower() , default="""ml.p3.2xlarge""" )
__UpperCAmelCase =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__UpperCAmelCase =_ask_field(
"""How many machines do you want use? [1]: """ , A_ , default=1 , )
__UpperCAmelCase =_ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=A_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A_ , use_cpu=A_ , dynamo_config=A_ , eca_instance_type=A_ , profile=A_ , region=A_ , iam_role_name=A_ , mixed_precision=A_ , num_machines=A_ , sagemaker_inputs_file=A_ , sagemaker_metrics_file=A_ , )
| 68 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
__A = {
"roberta-base": 5_12,
"roberta-large": 5_12,
"roberta-large-mnli": 5_12,
"distilroberta-base": 5_12,
"roberta-base-openai-detector": 5_12,
"roberta-large-openai-detector": 5_12,
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Any = ['input_ids', 'attention_mask']
lowerCamelCase : Optional[int] = RobertaTokenizer
def __init__( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Tuple="replace" , __SCREAMING_SNAKE_CASE : str="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : str="</s>" , __SCREAMING_SNAKE_CASE : Any="<s>" , __SCREAMING_SNAKE_CASE : int="<unk>" , __SCREAMING_SNAKE_CASE : Optional[Any]="<pad>" , __SCREAMING_SNAKE_CASE : int="<mask>" , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=True , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> int:
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop("""type""" ) )
__UpperCAmelCase =add_prefix_space
__UpperCAmelCase =pre_tok_class(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =add_prefix_space
__UpperCAmelCase ="""post_processor"""
__UpperCAmelCase =getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__UpperCAmelCase =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCAmelCase =tuple(state["""sep"""] )
if "cls" in state:
__UpperCAmelCase =tuple(state["""cls"""] )
__UpperCAmelCase =False
if state.get("""add_prefix_space""" , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__UpperCAmelCase =add_prefix_space
__UpperCAmelCase =True
if state.get("""trim_offsets""" , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__UpperCAmelCase =trim_offsets
__UpperCAmelCase =True
if changes_to_apply:
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , state.pop("""type""" ) )
__UpperCAmelCase =component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
def _a ( self : List[str] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> List[Any]:
__UpperCAmelCase =AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__UpperCAmelCase =value
def _a ( self : Any , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : str ) -> BatchEncoding:
__UpperCAmelCase =kwargs.get("""is_split_into_words""" , __SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> BatchEncoding:
__UpperCAmelCase =kwargs.get("""is_split_into_words""" , __SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
__UpperCAmelCase =self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int]=None ) -> List[str]:
__UpperCAmelCase =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase =[self.sep_token_id]
__UpperCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 68 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'ctrl'
lowerCamelCase : Any = ['past_key_values']
lowerCamelCase : Optional[int] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=246534 , __SCREAMING_SNAKE_CASE : int=256 , __SCREAMING_SNAKE_CASE : Optional[Any]=1280 , __SCREAMING_SNAKE_CASE : Optional[Any]=8192 , __SCREAMING_SNAKE_CASE : int=48 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=1e-6 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , **__SCREAMING_SNAKE_CASE : int , ) -> Any:
__UpperCAmelCase =vocab_size
__UpperCAmelCase =n_positions
__UpperCAmelCase =n_embd
__UpperCAmelCase =n_layer
__UpperCAmelCase =n_head
__UpperCAmelCase =dff
__UpperCAmelCase =resid_pdrop
__UpperCAmelCase =embd_pdrop
__UpperCAmelCase =layer_norm_epsilon
__UpperCAmelCase =initializer_range
__UpperCAmelCase =use_cache
super().__init__(**__SCREAMING_SNAKE_CASE )
| 68 | 1 |
from __future__ import annotations
def lowercase__ ( A_: float , A_: float , A_: float , ) -> tuple:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowercase__ ( A_: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase =k.replace(A_ , A_ )
if k.startswith("""encoder""" ):
__UpperCAmelCase =k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowercase__ ( A_: Tuple ) -> str:
"""simple docstring"""
__UpperCAmelCase =[
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase =sd.pop(A_ )
__UpperCAmelCase =k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase =v
__A = ["START"]
@torch.no_grad()
def lowercase__ ( A_: List[Any] , A_: str , A_: int ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase =torch.load(A_ , map_location="""cpu""" )
__UpperCAmelCase =model["""model"""]
__UpperCAmelCase =BlenderbotConfig.from_json_file(A_ )
__UpperCAmelCase =BlenderbotForConditionalGeneration(A_ )
__UpperCAmelCase =m.model.state_dict().keys()
__UpperCAmelCase =[]
__UpperCAmelCase ={}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase =rename_state_dict_key(A_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase =v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(A_ )
m.model.load_state_dict(A_ , strict=A_ )
m.half()
m.save_pretrained(A_ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__A = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 68 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__A = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 68 |
from itertools import permutations
def lowercase__ ( A_: tuple ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__UpperCAmelCase =[7, 11, 13, 17]
for i, test in enumerate(A_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase__ ( A_: int = 10 ) -> int:
"""simple docstring"""
return sum(
int("""""".join(map(A_ , A_ ) ) )
for num in permutations(range(A_ ) )
if is_substring_divisible(A_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 68 | 1 |
from typing import List
from .keymap import KEYMAP, get_character
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
def decorator(A_: int ):
__UpperCAmelCase =getattr(A_ , """handle_key""" , [] )
handle += [key]
setattr(A_ , """handle_key""" , A_ )
return func
return decorator
def lowercase__ ( *A_: List[str] ) -> Optional[int]:
"""simple docstring"""
def decorator(A_: Tuple ):
__UpperCAmelCase =getattr(A_ , """handle_key""" , [] )
handle += keys
setattr(A_ , """handle_key""" , A_ )
return func
return decorator
class _A ( UpperCamelCase ):
"""simple docstring"""
def __new__( cls : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> int:
__UpperCAmelCase =super().__new__(cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not hasattr(__SCREAMING_SNAKE_CASE , """key_handler""" ):
setattr(__SCREAMING_SNAKE_CASE , """key_handler""" , {} )
setattr(__SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] )
for key in handled_keys:
__UpperCAmelCase =value
return new_cls
@staticmethod
def _a ( cls : Dict ) -> List[Any]:
__UpperCAmelCase =get_character()
if char != KEYMAP["undefined"]:
__UpperCAmelCase =ord(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =cls.key_handler.get(__SCREAMING_SNAKE_CASE )
if handler:
__UpperCAmelCase =char
return handler(cls )
else:
return None
def lowercase__ ( cls: str ) -> int:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 68 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__A = TypeVar("T")
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (position - 1) // 2
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (2 * position) + 1
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (2 * position) + 2
class _A ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] ) -> None:
__UpperCAmelCase =[]
__UpperCAmelCase ={}
__UpperCAmelCase =0
def __len__( self : str ) -> int:
return self.elements
def __repr__( self : Dict ) -> str:
return str(self.heap )
def _a ( self : Optional[int] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__UpperCAmelCase =self.elements
self.elements += 1
self._bubble_up(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__UpperCAmelCase , __UpperCAmelCase =self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__UpperCAmelCase , __UpperCAmelCase =self.heap[0]
self._bubble_down(__SCREAMING_SNAKE_CASE )
return elem
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Update the weight of the given key
__UpperCAmelCase =self.position_map[elem]
__UpperCAmelCase =(elem, weight)
if position > 0:
__UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__UpperCAmelCase =self.position_map[elem]
if curr_pos == 0:
return None
__UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos]
__UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_up(__SCREAMING_SNAKE_CASE )
return None
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__UpperCAmelCase =self.position_map[elem]
__UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos]
__UpperCAmelCase =get_child_left_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =get_child_right_position(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements and child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position]
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
return None
if child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
return None
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None:
# Swap the nodes at the given positions
__UpperCAmelCase =self.heap[nodea_pos][0]
__UpperCAmelCase =self.heap[nodea_pos][0]
__UpperCAmelCase , __UpperCAmelCase =(
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__UpperCAmelCase =nodea_pos
__UpperCAmelCase =nodea_pos
class _A ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] ) -> None:
__UpperCAmelCase ={}
__UpperCAmelCase =0
def __repr__( self : Tuple ) -> str:
return str(self.connections )
def __len__( self : str ) -> int:
return self.nodes
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__UpperCAmelCase ={}
self.nodes += 1
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__SCREAMING_SNAKE_CASE )
self.add_node(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =weight
__UpperCAmelCase =weight
def lowercase__ ( A_: GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
"""simple docstring"""
__UpperCAmelCase ={node: maxsize for node in graph.connections}
__UpperCAmelCase ={node: None for node in graph.connections}
__UpperCAmelCase =MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(A_ , A_ )
if priority_queue.is_empty():
return dist, parent
# initialization
__UpperCAmelCase =priority_queue.extract_min()
__UpperCAmelCase =0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A_ , dist[neighbour] )
__UpperCAmelCase =node
# running prim's algorithm
while not priority_queue.is_empty():
__UpperCAmelCase =priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A_ , dist[neighbour] )
__UpperCAmelCase =node
return dist, parent
| 68 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _A ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Dict = StableUnCLIPImgaImgPipeline
lowerCamelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCamelCase : List[Any] = frozenset([] )
def _a ( self : Any ) -> Optional[int]:
__UpperCAmelCase =32
__UpperCAmelCase =embedder_hidden_size
# image encoding components
__UpperCAmelCase =CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__UpperCAmelCase =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__SCREAMING_SNAKE_CASE , projection_dim=__SCREAMING_SNAKE_CASE , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase =StableUnCLIPImageNormalizer(embedding_dim=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
__UpperCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__UpperCAmelCase =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__SCREAMING_SNAKE_CASE , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase =UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__SCREAMING_SNAKE_CASE , layers_per_block=1 , upcast_attention=__SCREAMING_SNAKE_CASE , use_linear_projection=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__UpperCAmelCase =DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase =AutoencoderKL()
__UpperCAmelCase ={
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=True ) -> Dict:
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__UpperCAmelCase =torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase =torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if pil_image:
__UpperCAmelCase =input_image * 0.5 + 0.5
__UpperCAmelCase =input_image.clamp(0 , 1 )
__UpperCAmelCase =input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__UpperCAmelCase =DiffusionPipeline.numpy_to_pil(__SCREAMING_SNAKE_CASE )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _a ( self : Tuple ) -> str:
__UpperCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase =self.get_dummy_components()
__UpperCAmelCase =StableUnCLIPImgaImgPipeline(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
inputs.update({"""image_embeds""": None} )
__UpperCAmelCase =sd_pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : int ) -> str:
__UpperCAmelCase =torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] ) -> Any:
__UpperCAmelCase =torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=__SCREAMING_SNAKE_CASE )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self : List[Any] ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : str ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Tuple ) -> List[str]:
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
__UpperCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
__UpperCAmelCase =StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase =torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase =pipe(__SCREAMING_SNAKE_CASE , """anime turle""" , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" )
__UpperCAmelCase =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
__UpperCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
__UpperCAmelCase =StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase =torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase =pipe(__SCREAMING_SNAKE_CASE , """anime turle""" , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" )
__UpperCAmelCase =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase =StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
__UpperCAmelCase =pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase =pipe(
__SCREAMING_SNAKE_CASE , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
__UpperCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 68 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A = logging.get_logger(__name__)
@dataclass
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Any , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase =deprecated_arg[3:]
__UpperCAmelCase =not kwargs.pop(__SCREAMING_SNAKE_CASE )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
__UpperCAmelCase =kwargs.pop("""tpu_name""" , self.tpu_name )
__UpperCAmelCase =kwargs.pop("""device_idx""" , self.device_idx )
__UpperCAmelCase =kwargs.pop("""eager_mode""" , self.eager_mode )
__UpperCAmelCase =kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**__SCREAMING_SNAKE_CASE )
lowerCamelCase : str = field(
default=UpperCamelCase , metadata={'help': 'Name of TPU'} , )
lowerCamelCase : int = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
lowerCamelCase : bool = field(default=UpperCamelCase , metadata={'help': 'Benchmark models in eager model.'} )
lowerCamelCase : bool = field(
default=UpperCamelCase , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def _a ( self : List[str] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["""tf"""] )
__UpperCAmelCase =None
if self.tpu:
try:
if self.tpu_name:
__UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__UpperCAmelCase =None
return tpu
@cached_property
def _a ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__UpperCAmelCase =tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
__UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
__UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def _a ( self : Optional[Any] ) -> bool:
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self : str ) -> "tf.distribute.Strategy":
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self : Dict ) -> Optional[int]:
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self : List[str] ) -> int:
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self : List[str] ) -> bool:
return self.n_gpu > 0
| 68 | 1 |
import argparse
from collections import defaultdict
import yaml
__A = "docs/source/en/_toctree.yml"
def lowercase__ ( A_: Tuple ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =defaultdict(A_ )
for doc in model_doc:
counts[doc["local"]] += 1
__UpperCAmelCase =[key for key, value in counts.items() if value > 1]
__UpperCAmelCase =[]
for duplicate_key in duplicates:
__UpperCAmelCase =list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(A_ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(A_ , key=lambda A_ : s["title"].lower() )
def lowercase__ ( A_: Optional[int]=False ) -> Tuple:
"""simple docstring"""
with open(A_ , encoding="""utf-8""" ) as f:
__UpperCAmelCase =yaml.safe_load(f.read() )
# Get to the API doc
__UpperCAmelCase =0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCAmelCase =content[api_idx]["""sections"""]
# Then to the model doc
__UpperCAmelCase =0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__UpperCAmelCase =api_doc[model_idx]["""sections"""]
__UpperCAmelCase =[(idx, section) for idx, section in enumerate(A_ ) if """sections""" in section]
__UpperCAmelCase =False
for idx, modality_doc in modalities_docs:
__UpperCAmelCase =modality_doc["""sections"""]
__UpperCAmelCase =clean_model_doc_toc(A_ )
if old_modality_doc != new_modality_doc:
__UpperCAmelCase =True
if overwrite:
__UpperCAmelCase =new_modality_doc
if diff:
if overwrite:
__UpperCAmelCase =model_doc
__UpperCAmelCase =api_doc
with open(A_ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(A_ , allow_unicode=A_ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__A = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 68 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _a ( self : int ) -> Union[str, Any]:
__UpperCAmelCase =self.dummy_uncond_unet
__UpperCAmelCase =ScoreSdeVeScheduler()
__UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
sde_ve.to(__SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.manual_seed(0 )
__UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =torch.manual_seed(0 )
__UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )[
0
]
__UpperCAmelCase =image[0, -3:, -3:, -1]
__UpperCAmelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ) -> int:
__UpperCAmelCase ="""google/ncsnpp-church-256"""
__UpperCAmelCase =UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ScoreSdeVeScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
sde_ve.to(__SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.manual_seed(0 )
__UpperCAmelCase =sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCAmelCase =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 68 | 1 |
import math
import os
import sys
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
__UpperCAmelCase =""""""
try:
with open(A_ , """rb""" ) as binary_file:
__UpperCAmelCase =binary_file.read()
for dat in data:
__UpperCAmelCase =F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase__ ( A_: dict[str, str] , A_: str , A_: int , A_: str ) -> None:
"""simple docstring"""
lexicon.pop(A_ )
__UpperCAmelCase =last_match_id
if math.loga(A_ ).is_integer():
for curr_key in lexicon:
__UpperCAmelCase ="""0""" + lexicon[curr_key]
__UpperCAmelCase =bin(A_ )[2:]
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
__UpperCAmelCase ={"""0""": """0""", """1""": """1"""}
__UpperCAmelCase , __UpperCAmelCase ="""""", """"""
__UpperCAmelCase =len(A_ )
for i in range(len(A_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__UpperCAmelCase =lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(A_ , A_ , A_ , A_ )
index += 1
__UpperCAmelCase =""""""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__UpperCAmelCase =lexicon[curr_string]
result += last_match_id
return result
def lowercase__ ( A_: str , A_: str ) -> str:
"""simple docstring"""
__UpperCAmelCase =os.path.getsize(A_ )
__UpperCAmelCase =bin(A_ )[2:]
__UpperCAmelCase =len(A_ )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowercase__ ( A_: str , A_: str ) -> None:
"""simple docstring"""
__UpperCAmelCase =8
try:
with open(A_ , """wb""" ) as opened_file:
__UpperCAmelCase =[
to_write[i : i + byte_length]
for i in range(0 , len(A_ ) , A_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(A_ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase__ ( A_: str , A_: str ) -> None:
"""simple docstring"""
__UpperCAmelCase =read_file_binary(A_ )
__UpperCAmelCase =compress_data(A_ )
__UpperCAmelCase =add_file_length(A_ , A_ )
write_file_binary(A_ , A_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 68 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__A = logging.get_logger(__name__)
__A = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=None , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if config is None:
assert isinstance(self.model , __SCREAMING_SNAKE_CASE ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
__UpperCAmelCase =self.model.config
else:
__UpperCAmelCase =config
__UpperCAmelCase =data_args
__UpperCAmelCase =self.config.tgt_vocab_size if isinstance(self.config , __SCREAMING_SNAKE_CASE ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
__UpperCAmelCase =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__UpperCAmelCase =label_smoothed_nll_loss
def _a ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Any:
if self.optimizer is None:
__UpperCAmelCase =["""bias""", """LayerNorm.weight"""]
__UpperCAmelCase =[
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
__UpperCAmelCase =Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__UpperCAmelCase =Adafactor
__UpperCAmelCase ={"""scale_parameter""": False, """relative_step""": False}
else:
__UpperCAmelCase =AdamW
__UpperCAmelCase ={
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
__UpperCAmelCase =self.args.learning_rate
if self.sharded_ddp:
__UpperCAmelCase =OSS(
params=__SCREAMING_SNAKE_CASE , optim=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
__UpperCAmelCase =optimizer_cls(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.lr_scheduler is None:
__UpperCAmelCase =self._get_lr_scheduler(__SCREAMING_SNAKE_CASE )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
__UpperCAmelCase =arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__UpperCAmelCase =schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__UpperCAmelCase =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__UpperCAmelCase =schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE )
return scheduler
def _a ( self : Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
__UpperCAmelCase =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__UpperCAmelCase , __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[:2]
else:
# compute label smoothed loss
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
__UpperCAmelCase =torch.nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 )
__UpperCAmelCase , __UpperCAmelCase =self.loss_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
__UpperCAmelCase =inputs.pop("""labels""" )
__UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return loss
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, Union[torch.Tensor, Any]] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
__UpperCAmelCase =self._prepare_inputs(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__UpperCAmelCase =self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__SCREAMING_SNAKE_CASE , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
__UpperCAmelCase =inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
__UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__UpperCAmelCase =generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
# If PAD token is not defined at least EOS token has to be defined
__UpperCAmelCase =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
__UpperCAmelCase =pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__UpperCAmelCase =tensor
return padded_tensor
| 68 | 1 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__A = 25_00_04
__A = 25_00_20
@require_sentencepiece
@require_tokenizers
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Tuple = MBartTokenizer
lowerCamelCase : List[str] = MBartTokenizerFast
lowerCamelCase : int = True
lowerCamelCase : List[str] = True
def _a ( self : List[Any] ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase =MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__UpperCAmelCase =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__UpperCAmelCase =tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__UpperCAmelCase =tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _a ( self : Optional[Any] ) -> Optional[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCAmelCase =(self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase =self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =tempfile.mkdtemp()
__UpperCAmelCase =tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__UpperCAmelCase =tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
__UpperCAmelCase =tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
__UpperCAmelCase =tempfile.mkdtemp()
__UpperCAmelCase =tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
__UpperCAmelCase =tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
__UpperCAmelCase =tempfile.mkdtemp()
__UpperCAmelCase =tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__UpperCAmelCase =tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[str] = 'facebook/mbart-large-en-ro'
lowerCamelCase : Tuple = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowerCamelCase : List[str] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowerCamelCase : str = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def _a ( cls : int ) -> Union[str, Any]:
__UpperCAmelCase =MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__UpperCAmelCase =1
return cls
def _a ( self : Union[str, Any] ) -> int:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 )
def _a ( self : Dict ) -> str:
__UpperCAmelCase =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] ) -> Any:
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
__UpperCAmelCase =[RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
__UpperCAmelCase =self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def _a ( self : List[str] ) -> Tuple:
__UpperCAmelCase =["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =10
__UpperCAmelCase =self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _a ( self : int ) -> int:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] )
def _a ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase =tempfile.mkdtemp()
__UpperCAmelCase =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def _a ( self : Optional[int] ) -> Any:
__UpperCAmelCase =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
__UpperCAmelCase =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _a ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__UpperCAmelCase =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__UpperCAmelCase =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _a ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase =self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
__UpperCAmelCase =self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=10 , return_tensors="""pt""" )
__UpperCAmelCase =targets["""input_ids"""]
__UpperCAmelCase =shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _a ( self : int ) -> Any:
__UpperCAmelCase =self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 3034, 2, 250004]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250001,
} , )
| 68 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str=0 ) -> Any:
__UpperCAmelCase =floats_tensor((1, 3, 128, 128) , rng=random.Random(__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =np.random.RandomState(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : Optional[Any] ) -> int:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# warmup pass to apply optimizations
__UpperCAmelCase =pipe(**self.get_dummy_inputs() )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self : List[str] ) -> Optional[int]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Dict ) -> int:
__UpperCAmelCase =ort.SessionOptions()
__UpperCAmelCase =False
return options
def _a ( self : Dict ) -> Any:
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase =init_image.resize((768, 512) )
# using the PNDM scheduler by default
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""A fantasy landscape, trending on artstation"""
__UpperCAmelCase =np.random.RandomState(0 )
__UpperCAmelCase =pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
__UpperCAmelCase =output.images
__UpperCAmelCase =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase =np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _a ( self : List[str] ) -> str:
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase =init_image.resize((768, 512) )
__UpperCAmelCase =LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""A fantasy landscape, trending on artstation"""
__UpperCAmelCase =np.random.RandomState(0 )
__UpperCAmelCase =pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
__UpperCAmelCase =output.images
__UpperCAmelCase =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase =np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 68 | 1 |
__A = 8.31_4462 # Unit - J mol-1 K-1
def lowercase__ ( A_: float , A_: float , A_: float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowercase__ ( A_: float , A_: float , A_: float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 68 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__A = logging.getLogger(__name__)
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = 'sequence-classification'
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
if type(__SCREAMING_SNAKE_CASE ) == dict:
__UpperCAmelCase =Namespace(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =glue_output_modes[hparams.task]
__UpperCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.mode )
def _a ( self : str , **__SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
return self.model(**__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
__UpperCAmelCase ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__UpperCAmelCase =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
__UpperCAmelCase =self(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =outputs[0]
__UpperCAmelCase =self.trainer.lr_schedulers[0]["""scheduler"""]
__UpperCAmelCase ={"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _a ( self : Tuple ) -> List[Any]:
__UpperCAmelCase =self.hparams
__UpperCAmelCase =processors[args.task]()
__UpperCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
__UpperCAmelCase =self._feature_file(__SCREAMING_SNAKE_CASE )
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
__UpperCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
__UpperCAmelCase =convert_examples_to_features(
__SCREAMING_SNAKE_CASE , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __SCREAMING_SNAKE_CASE )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> DataLoader:
__UpperCAmelCase ="""dev""" if mode == """test""" else mode
__UpperCAmelCase =self._feature_file(__SCREAMING_SNAKE_CASE )
logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.load(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__UpperCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__UpperCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__UpperCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__UpperCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , batch_size=__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int ) -> str:
__UpperCAmelCase ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__UpperCAmelCase =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
__UpperCAmelCase =self(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =outputs[:2]
__UpperCAmelCase =logits.detach().cpu().numpy()
__UpperCAmelCase =inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Any ) -> tuple:
__UpperCAmelCase =torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
__UpperCAmelCase =np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__UpperCAmelCase =np.argmax(__SCREAMING_SNAKE_CASE , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__UpperCAmelCase =np.squeeze(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =np.concatenate([x["""target"""] for x in outputs] , axis=0 )
__UpperCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
__UpperCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
__UpperCAmelCase ={**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
__UpperCAmelCase =dict(results.items() )
__UpperCAmelCase =results
return ret, preds_list, out_label_list
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : list ) -> dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._eval_end(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._eval_end(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _a ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def lowercase__ ( ) -> str:
"""simple docstring"""
__UpperCAmelCase =argparse.ArgumentParser()
add_generic_args(A_ , os.getcwd() )
__UpperCAmelCase =GLUETransformer.add_model_specific_args(A_ , os.getcwd() )
__UpperCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__UpperCAmelCase =os.path.join(
"""./results""" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__UpperCAmelCase =GLUETransformer(A_ )
__UpperCAmelCase =generic_train(A_ , A_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__UpperCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=A_ ) )
__UpperCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(A_ )
if __name__ == "__main__":
main()
| 68 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 68 |
def lowercase__ ( A_: int , A_: int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowercase__ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 68 | 1 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__A = logging.get_logger("transformers.models.encodec")
__A = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__A = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__A = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__A = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__A = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__A = []
__A = []
def lowercase__ ( A_: str , A_: str , A_: List[str] , A_: List[str] , A_: Union[str, Any] ) -> str:
"""simple docstring"""
for attribute in key.split(""".""" ):
__UpperCAmelCase =getattr(A_ , A_ )
if weight_type is not None:
__UpperCAmelCase =getattr(A_ , A_ ).shape
else:
__UpperCAmelCase =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__UpperCAmelCase =value
elif weight_type == "weight_g":
__UpperCAmelCase =value
elif weight_type == "weight_v":
__UpperCAmelCase =value
elif weight_type == "bias":
__UpperCAmelCase =value
elif weight_type == "running_mean":
__UpperCAmelCase =value
elif weight_type == "running_var":
__UpperCAmelCase =value
elif weight_type == "num_batches_tracked":
__UpperCAmelCase =value
elif weight_type == "weight_ih_l0":
__UpperCAmelCase =value
elif weight_type == "weight_hh_l0":
__UpperCAmelCase =value
elif weight_type == "bias_ih_l0":
__UpperCAmelCase =value
elif weight_type == "bias_hh_l0":
__UpperCAmelCase =value
elif weight_type == "weight_ih_l1":
__UpperCAmelCase =value
elif weight_type == "weight_hh_l1":
__UpperCAmelCase =value
elif weight_type == "bias_ih_l1":
__UpperCAmelCase =value
elif weight_type == "bias_hh_l1":
__UpperCAmelCase =value
else:
__UpperCAmelCase =value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowercase__ ( A_: Any , A_: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__UpperCAmelCase , __UpperCAmelCase =key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase__ ( A_: Optional[Any] , A_: int , A_: Any ) -> Dict:
"""simple docstring"""
__UpperCAmelCase =[]
if model_name == "encodec_24khz" or "encodec_32khz":
__UpperCAmelCase =MAPPING_24K
elif model_name == "encodec_48khz":
__UpperCAmelCase =MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(A_ , A_ ):
logger.info(F'''{name} was ignored''' )
continue
__UpperCAmelCase =False
for key, mapped_key in MAPPING.items():
if "*" in key:
__UpperCAmelCase , __UpperCAmelCase =key.split(""".*.""" )
if prefix in name and suffix in name:
__UpperCAmelCase =suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
__UpperCAmelCase =True
if "*" in mapped_key:
__UpperCAmelCase =name.split(A_ )[0].split(""".""" )[-2]
__UpperCAmelCase =mapped_key.replace("""*""" , A_ )
if "weight_g" in name:
__UpperCAmelCase ="""weight_g"""
elif "weight_v" in name:
__UpperCAmelCase ="""weight_v"""
elif "weight_ih_l0" in name:
__UpperCAmelCase ="""weight_ih_l0"""
elif "weight_hh_l0" in name:
__UpperCAmelCase ="""weight_hh_l0"""
elif "bias_ih_l0" in name:
__UpperCAmelCase ="""bias_ih_l0"""
elif "bias_hh_l0" in name:
__UpperCAmelCase ="""bias_hh_l0"""
elif "weight_ih_l1" in name:
__UpperCAmelCase ="""weight_ih_l1"""
elif "weight_hh_l1" in name:
__UpperCAmelCase ="""weight_hh_l1"""
elif "bias_ih_l1" in name:
__UpperCAmelCase ="""bias_ih_l1"""
elif "bias_hh_l1" in name:
__UpperCAmelCase ="""bias_hh_l1"""
elif "bias" in name:
__UpperCAmelCase ="""bias"""
elif "weight" in name:
__UpperCAmelCase ="""weight"""
elif "running_mean" in name:
__UpperCAmelCase ="""running_mean"""
elif "running_var" in name:
__UpperCAmelCase ="""running_var"""
elif "num_batches_tracked" in name:
__UpperCAmelCase ="""num_batches_tracked"""
else:
__UpperCAmelCase =None
set_recursively(A_ , A_ , A_ , A_ , A_ )
continue
if not is_used:
unused_weights.append(A_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def lowercase__ ( A_: str , A_: Dict , A_: Tuple , A_: Optional[int]=None , A_: Any=None , ) -> List[str]:
"""simple docstring"""
if config_path is not None:
__UpperCAmelCase =EncodecConfig.from_pretrained(A_ )
else:
__UpperCAmelCase =EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__UpperCAmelCase =[8, 5, 4, 4]
__UpperCAmelCase =[2.2]
__UpperCAmelCase =64
__UpperCAmelCase =32000
__UpperCAmelCase =2048
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
elif model_name == "encodec_48khz":
__UpperCAmelCase =[8, 5, 4, 2]
__UpperCAmelCase =[3.0, 6.0, 1_2.0, 2_4.0]
__UpperCAmelCase =48000
__UpperCAmelCase =2
__UpperCAmelCase =False
__UpperCAmelCase ="""time_group_norm"""
__UpperCAmelCase =True
__UpperCAmelCase =1.0
__UpperCAmelCase =0.0_1
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
__UpperCAmelCase =EncodecModel(A_ )
__UpperCAmelCase =EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(A_ )
__UpperCAmelCase =torch.load(A_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__UpperCAmelCase =original_checkpoint["""best_state"""]
recursively_load_weights(A_ , A_ , A_ )
model.save_pretrained(A_ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(A_ )
model.push_to_hub(A_ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 68 |
from __future__ import annotations
import bisect
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
__UpperCAmelCase =len(A_ )
while lo < hi:
__UpperCAmelCase =lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__UpperCAmelCase =mid + 1
else:
__UpperCAmelCase =mid
return lo
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
__UpperCAmelCase =len(A_ )
while lo < hi:
__UpperCAmelCase =lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__UpperCAmelCase =mid + 1
else:
__UpperCAmelCase =mid
return lo
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_left(A_ , A_ , A_ , A_ ) , A_ )
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_right(A_ , A_ , A_ , A_ ) , A_ )
def lowercase__ ( A_: list[int] , A_: int ) -> int | None:
"""simple docstring"""
__UpperCAmelCase =0
__UpperCAmelCase =len(A_ ) - 1
while left <= right:
__UpperCAmelCase =left + (right - left) // 2
__UpperCAmelCase =sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__UpperCAmelCase =midpoint - 1
else:
__UpperCAmelCase =midpoint + 1
return None
def lowercase__ ( A_: list[int] , A_: int ) -> int | None:
"""simple docstring"""
__UpperCAmelCase =bisect.bisect_left(A_ , A_ )
if index != len(A_ ) and sorted_collection[index] == item:
return index
return None
def lowercase__ ( A_: list[int] , A_: int , A_: int , A_: int ) -> int | None:
"""simple docstring"""
if right < left:
return None
__UpperCAmelCase =left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(A_ , A_ , A_ , midpoint - 1 )
else:
return binary_search_by_recursion(A_ , A_ , midpoint + 1 , A_ )
if __name__ == "__main__":
__A = input("Enter numbers separated by comma:\n").strip()
__A = sorted(int(item) for item in user_input.split(","))
__A = int(input("Enter a single number to be found in the list:\n"))
__A = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 68 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : List[str] ) -> Tuple:
__UpperCAmelCase =TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__UpperCAmelCase =tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
__UpperCAmelCase =tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
__UpperCAmelCase =tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 68 |
from typing import List
from .keymap import KEYMAP, get_character
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
def decorator(A_: int ):
__UpperCAmelCase =getattr(A_ , """handle_key""" , [] )
handle += [key]
setattr(A_ , """handle_key""" , A_ )
return func
return decorator
def lowercase__ ( *A_: List[str] ) -> Optional[int]:
"""simple docstring"""
def decorator(A_: Tuple ):
__UpperCAmelCase =getattr(A_ , """handle_key""" , [] )
handle += keys
setattr(A_ , """handle_key""" , A_ )
return func
return decorator
class _A ( UpperCamelCase ):
"""simple docstring"""
def __new__( cls : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> int:
__UpperCAmelCase =super().__new__(cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not hasattr(__SCREAMING_SNAKE_CASE , """key_handler""" ):
setattr(__SCREAMING_SNAKE_CASE , """key_handler""" , {} )
setattr(__SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] )
for key in handled_keys:
__UpperCAmelCase =value
return new_cls
@staticmethod
def _a ( cls : Dict ) -> List[Any]:
__UpperCAmelCase =get_character()
if char != KEYMAP["undefined"]:
__UpperCAmelCase =ord(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =cls.key_handler.get(__SCREAMING_SNAKE_CASE )
if handler:
__UpperCAmelCase =char
return handler(cls )
else:
return None
def lowercase__ ( cls: str ) -> int:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 68 | 1 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__A = trt.Logger(trt.Logger.WARNING)
__A = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__A = logging.getLogger(__name__)
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=3_84,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=1_28,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__A = parser.parse_args()
if args.tokenizer_name:
__A = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__A = args.per_device_eval_batch_size
__A = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__A = True
__A = "temp_engine/bert-fp32.engine"
if args.fpaa:
__A = "temp_engine/bert-fp16.engine"
if args.inta:
__A = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__A = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__A = [network.get_input(i) for i in range(network.num_inputs)]
__A = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__A = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__A = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__A = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def lowercase__ ( A_: Optional[Any] , A_: List[str] , A_: List[Any] , A_: Optional[Any] , A_: Tuple , A_: Tuple , A_: Tuple , A_: List[str] ) -> Dict:
"""simple docstring"""
__UpperCAmelCase =np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__UpperCAmelCase =np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__UpperCAmelCase =np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , A_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , A_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , A_ )
# start time
__UpperCAmelCase =time.time()
# Run inference
context.execute_async(
bindings=[int(A_ ) for d_inp in d_inputs] + [int(A_ ), int(A_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(A_ , A_ , A_ )
cuda.memcpy_dtoh_async(A_ , A_ , A_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__UpperCAmelCase =time.time()
__UpperCAmelCase =end_time - start_time
__UpperCAmelCase =(h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__A = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__A = raw_datasets["validation"].column_names
__A = "question" if "question" in column_names else column_names[0]
__A = "context" if "context" in column_names else column_names[1]
__A = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__A = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__A = min(args.max_seq_length, tokenizer.model_max_length)
def lowercase__ ( A_: List[str] ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =[q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__UpperCAmelCase =tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=A_ , stride=args.doc_stride , return_overflowing_tokens=A_ , return_offsets_mapping=A_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__UpperCAmelCase =tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__UpperCAmelCase =[]
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__UpperCAmelCase =tokenized_examples.sequence_ids(A_ )
__UpperCAmelCase =1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__UpperCAmelCase =sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__UpperCAmelCase =[
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__A = raw_datasets["validation"]
# Validation Feature Creation
__A = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__A = default_data_collator
__A = eval_dataset.remove_columns(["example_id", "offset_mapping"])
__A = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowercase__ ( A_: Union[str, Any] , A_: Optional[Any] , A_: Optional[int] , A_: Union[str, Any]="eval" ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase =postprocess_qa_predictions(
examples=A_ , features=A_ , predictions=A_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=A_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__UpperCAmelCase =[
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__UpperCAmelCase =[{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__UpperCAmelCase =[{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=A_ , label_ids=A_ )
__A = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowercase__ ( A_: List[Any] ) -> Dict:
"""simple docstring"""
return trt.volume(engine.get_binding_shape(A_ ) ) * engine.get_binding_dtype(A_ ).itemsize
# Allocate device memory for inputs and outputs.
__A = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__A = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__A = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__A = cuda.mem_alloc(h_outputa.nbytes)
__A = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__A = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(F""" Num examples = {len(eval_dataset)}""")
logger.info(F""" Batch size = {args.per_device_eval_batch_size}""")
__A = 0.0
__A = 0
__A = timeit.default_timer()
__A = None
for step, batch in enumerate(eval_dataloader):
__A , __A = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__A , __A = outputs
__A = torch.tensor(start_logits)
__A = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__A = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
__A = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
__A = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__A = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
__A = nested_truncate(all_preds, len(eval_dataset))
__A = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 10_00 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 10_00))
logger.info("Total Number of Inference = %d", niter)
__A = post_processing_function(eval_examples, eval_dataset, all_preds)
__A = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"""Evaluation metrics: {eval_metric}""")
| 68 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 68 | 1 |
from collections import defaultdict
class _A :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
__UpperCAmelCase =total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__UpperCAmelCase =[
[-1 for i in range(total + 1 )] for j in range(2 ** len(__SCREAMING_SNAKE_CASE ) )
]
__UpperCAmelCase =defaultdict(__SCREAMING_SNAKE_CASE ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__UpperCAmelCase =(1 << len(__SCREAMING_SNAKE_CASE )) - 1
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__UpperCAmelCase =self.count_ways_until(__SCREAMING_SNAKE_CASE , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__UpperCAmelCase =total_ways_util
return self.dp[mask][task_no]
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Tuple ) -> str:
# Store the list of persons for each task
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
for j in task_performed[i]:
self.task[j].append(__SCREAMING_SNAKE_CASE )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__A = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__A = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 68 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]=99 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=4 , ) -> Optional[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =seq_length
__UpperCAmelCase =is_training
__UpperCAmelCase =use_attention_mask
__UpperCAmelCase =use_token_type_ids
__UpperCAmelCase =use_labels
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =num_choices
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase =None
if self.use_attention_mask:
__UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase =None
if self.use_token_type_ids:
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase =True
__UpperCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =FlaxRobertaModelTester(self )
@slow
def _a ( self : Optional[Any] ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase =model_class_name.from_pretrained("""roberta-base""" , from_pt=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 68 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase__ ( A_: str , A_: int=0.9_9_9 , A_: List[str]="cosine" , ) -> Tuple:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A_: Tuple ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A_: int ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__UpperCAmelCase =[]
for i in range(A_ ):
__UpperCAmelCase =i / num_diffusion_timesteps
__UpperCAmelCase =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A_ ) / alpha_bar_fn(A_ ) , A_ ) )
return torch.tensor(A_ , dtype=torch.floataa )
class _A ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
lowerCamelCase : Dict = 2
@register_to_config
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int = 1000 , __SCREAMING_SNAKE_CASE : float = 0.00_085 , __SCREAMING_SNAKE_CASE : float = 0.012 , __SCREAMING_SNAKE_CASE : str = "linear" , __SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , __SCREAMING_SNAKE_CASE : str = "epsilon" , __SCREAMING_SNAKE_CASE : str = "linspace" , __SCREAMING_SNAKE_CASE : int = 0 , ) -> Optional[Any]:
if trained_betas is not None:
__UpperCAmelCase =torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "linear":
__UpperCAmelCase =torch.linspace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCAmelCase =(
torch.linspace(beta_start**0.5 , beta_end**0.5 , __SCREAMING_SNAKE_CASE , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCAmelCase =betas_for_alpha_bar(__SCREAMING_SNAKE_CASE )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__UpperCAmelCase =1.0 - self.betas
__UpperCAmelCase =torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str=None ) -> Optional[int]:
if schedule_timesteps is None:
__UpperCAmelCase =self.timesteps
__UpperCAmelCase =(schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__UpperCAmelCase =1 if len(__SCREAMING_SNAKE_CASE ) > 1 else 0
else:
__UpperCAmelCase =timestep.cpu().item() if torch.is_tensor(__SCREAMING_SNAKE_CASE ) else timestep
__UpperCAmelCase =self._index_counter[timestep_int]
return indices[pos].item()
@property
def _a ( self : List[str] ) -> Any:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _a ( self : int , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
__UpperCAmelCase =self.index_for_timestep(__SCREAMING_SNAKE_CASE )
if self.state_in_first_order:
__UpperCAmelCase =self.sigmas[step_index]
else:
__UpperCAmelCase =self.sigmas_interpol[step_index]
__UpperCAmelCase =sample / ((sigma**2 + 1) ** 0.5)
return sample
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, torch.device] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , ) -> Optional[Any]:
__UpperCAmelCase =num_inference_steps
__UpperCAmelCase =num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__UpperCAmelCase =np.linspace(0 , num_train_timesteps - 1 , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__UpperCAmelCase =num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCAmelCase =(np.arange(0 , __SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(__SCREAMING_SNAKE_CASE )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__UpperCAmelCase =num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCAmelCase =(np.arange(__SCREAMING_SNAKE_CASE , 0 , -step_ratio )).round().copy().astype(__SCREAMING_SNAKE_CASE )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__UpperCAmelCase =np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__UpperCAmelCase =torch.from_numpy(np.log(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =np.interp(__SCREAMING_SNAKE_CASE , np.arange(0 , len(__SCREAMING_SNAKE_CASE ) ) , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__UpperCAmelCase =torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(device=__SCREAMING_SNAKE_CASE )
# interpolate sigmas
__UpperCAmelCase =sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__UpperCAmelCase =torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__UpperCAmelCase =torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
# mps does not support float64
__UpperCAmelCase =torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
else:
__UpperCAmelCase =torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
# interpolate timesteps
__UpperCAmelCase =self.sigma_to_t(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE , dtype=timesteps.dtype )
__UpperCAmelCase =torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__UpperCAmelCase =torch.cat([timesteps[:1], interleaved_timesteps] )
__UpperCAmelCase =None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__UpperCAmelCase =defaultdict(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
# get log sigma
__UpperCAmelCase =sigma.log()
# get distribution
__UpperCAmelCase =log_sigma - self.log_sigmas[:, None]
# get sigmas range
__UpperCAmelCase =dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__UpperCAmelCase =low_idx + 1
__UpperCAmelCase =self.log_sigmas[low_idx]
__UpperCAmelCase =self.log_sigmas[high_idx]
# interpolate sigmas
__UpperCAmelCase =(low - log_sigma) / (low - high)
__UpperCAmelCase =w.clamp(0 , 1 )
# transform interpolation to time range
__UpperCAmelCase =(1 - w) * low_idx + w * high_idx
__UpperCAmelCase =t.view(sigma.shape )
return t
@property
def _a ( self : str ) -> Optional[Any]:
return self.sample is None
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , __SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , __SCREAMING_SNAKE_CASE : bool = True , ) -> Union[SchedulerOutput, Tuple]:
__UpperCAmelCase =self.index_for_timestep(__SCREAMING_SNAKE_CASE )
# advance index counter by 1
__UpperCAmelCase =timestep.cpu().item() if torch.is_tensor(__SCREAMING_SNAKE_CASE ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__UpperCAmelCase =self.sigmas[step_index]
__UpperCAmelCase =self.sigmas_interpol[step_index + 1]
__UpperCAmelCase =self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__UpperCAmelCase =self.sigmas[step_index - 1]
__UpperCAmelCase =self.sigmas_interpol[step_index]
__UpperCAmelCase =self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__UpperCAmelCase =0
__UpperCAmelCase =sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__UpperCAmelCase =sigma_hat if self.state_in_first_order else sigma_interpol
__UpperCAmelCase =sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__UpperCAmelCase =sigma_hat if self.state_in_first_order else sigma_interpol
__UpperCAmelCase =model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__UpperCAmelCase =(sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__UpperCAmelCase =sigma_interpol - sigma_hat
# store for 2nd order step
__UpperCAmelCase =sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__UpperCAmelCase =(sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__UpperCAmelCase =sigma_next - sigma_hat
__UpperCAmelCase =self.sample
__UpperCAmelCase =None
__UpperCAmelCase =sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__UpperCAmelCase =self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__SCREAMING_SNAKE_CASE ):
# mps does not support float64
__UpperCAmelCase =self.timesteps.to(original_samples.device , dtype=torch.floataa )
__UpperCAmelCase =timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__UpperCAmelCase =self.timesteps.to(original_samples.device )
__UpperCAmelCase =timesteps.to(original_samples.device )
__UpperCAmelCase =[self.index_for_timestep(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for t in timesteps]
__UpperCAmelCase =sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__UpperCAmelCase =sigma.unsqueeze(-1 )
__UpperCAmelCase =original_samples + noise * sigma
return noisy_samples
def __len__( self : int ) -> List[str]:
return self.config.num_train_timesteps
| 68 |
from __future__ import annotations
def lowercase__ ( A_: list[list[int]] ) -> int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 1 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _a ( self : int ) -> Union[str, Any]:
__UpperCAmelCase =self.dummy_uncond_unet
__UpperCAmelCase =ScoreSdeVeScheduler()
__UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
sde_ve.to(__SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.manual_seed(0 )
__UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =torch.manual_seed(0 )
__UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )[
0
]
__UpperCAmelCase =image[0, -3:, -3:, -1]
__UpperCAmelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ) -> int:
__UpperCAmelCase ="""google/ncsnpp-church-256"""
__UpperCAmelCase =UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ScoreSdeVeScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
sde_ve.to(__SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.manual_seed(0 )
__UpperCAmelCase =sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCAmelCase =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 68 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowercase__ ( A_: int , A_: int , A_: int , A_: int , A_: int , A_: int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__UpperCAmelCase =ksize + 1
__UpperCAmelCase =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(A_ ):
for x in range(A_ ):
# distance from center
__UpperCAmelCase =x - ksize // 2
__UpperCAmelCase =y - ksize // 2
# degree to radiant
__UpperCAmelCase =theta / 180 * np.pi
__UpperCAmelCase =np.cos(_theta )
__UpperCAmelCase =np.sin(_theta )
# get kernel x
__UpperCAmelCase =cos_theta * px + sin_theta * py
# get kernel y
__UpperCAmelCase =-sin_theta * px + cos_theta * py
# fill kernel
__UpperCAmelCase =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__A = imread("../image_data/lena.jpg")
# turn image in gray scale value
__A = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__A = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__A = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__A = out / out.max() * 2_55
__A = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 68 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowercase__ ( *A_: Optional[int] ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
__UpperCAmelCase =list(A_ )
for i in range(len(A_ ) ):
__UpperCAmelCase =None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowercase__ ( A_: Exception ) -> bool:
"""simple docstring"""
__UpperCAmelCase =[
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(A_ , A_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowercase__ ( A_: callable = None , A_: int = 128 ) -> Any:
"""simple docstring"""
if function is None:
return functools.partial(A_ , starting_batch_size=A_ )
__UpperCAmelCase =starting_batch_size
def decorator(*A_: int , **A_: Any ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__UpperCAmelCase =list(inspect.signature(A_ ).parameters.keys() )
# Guard against user error
if len(A_ ) < (len(A_ ) + 1):
__UpperCAmelCase =""", """.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(A_ , *A_ , **A_ )
except Exception as e:
if should_reduce_batch_size(A_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 68 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 2, 1] , __SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 4] , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Any=2.0 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Tuple=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : Dict=8 , ) -> List[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =image_size
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =embed_dim
__UpperCAmelCase =depths
__UpperCAmelCase =num_heads
__UpperCAmelCase =window_size
__UpperCAmelCase =mlp_ratio
__UpperCAmelCase =qkv_bias
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =drop_path_rate
__UpperCAmelCase =hidden_act
__UpperCAmelCase =use_absolute_embeddings
__UpperCAmelCase =patch_norm
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =initializer_range
__UpperCAmelCase =is_training
__UpperCAmelCase =scope
__UpperCAmelCase =use_labels
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =encoder_stride
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase =None
if self.use_labels:
__UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase =self.get_config()
return config, pixel_values, labels
def _a ( self : List[Any] ) -> Optional[Any]:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
__UpperCAmelCase =SwinvaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Tuple:
__UpperCAmelCase =SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase =1
__UpperCAmelCase =SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__UpperCAmelCase =self.type_sequence_label_size
__UpperCAmelCase =SwinvaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : List[str] ) -> Tuple:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCamelCase : Tuple = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Dict = False
lowerCamelCase : Tuple = False
lowerCamelCase : List[str] = False
lowerCamelCase : Tuple = False
def _a ( self : str ) -> str:
__UpperCAmelCase =SwinvaModelTester(self )
__UpperCAmelCase =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 )
def _a ( self : List[Any] ) -> Optional[int]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : str ) -> str:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def _a ( self : Tuple ) -> Tuple:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def _a ( self : Optional[Any] ) -> int:
pass
def _a ( self : Tuple ) -> int:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _a ( self : str ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase =[*signature.parameters.keys()]
__UpperCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =True
for model_class in self.all_model_classes:
__UpperCAmelCase =True
__UpperCAmelCase =False
__UpperCAmelCase =True
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
__UpperCAmelCase =len(self.model_tester.depths )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase =True
__UpperCAmelCase =config.window_size**2
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__UpperCAmelCase =len(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__UpperCAmelCase =True
__UpperCAmelCase =True
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
__UpperCAmelCase =self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__UpperCAmelCase =2
self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.hidden_states
__UpperCAmelCase =getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# Swinv2 has a different seq_length
__UpperCAmelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase =outputs.reshaped_hidden_states
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =reshaped_hidden_states[0].shape
__UpperCAmelCase =(
reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a ( self : str ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =3
__UpperCAmelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Dict:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _a ( self : int ) -> Dict:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase =SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _a ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =_config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ) -> Dict:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def _a ( self : int ) -> Optional[int]:
__UpperCAmelCase =SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.default_image_processor
__UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCAmelCase =image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__UpperCAmelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 68 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = 'cvt'
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : List[Any]=[7, 3, 3] , __SCREAMING_SNAKE_CASE : Any=[4, 2, 2] , __SCREAMING_SNAKE_CASE : List[Any]=[2, 1, 1] , __SCREAMING_SNAKE_CASE : List[str]=[64, 192, 384] , __SCREAMING_SNAKE_CASE : Any=[1, 3, 6] , __SCREAMING_SNAKE_CASE : Optional[int]=[1, 2, 10] , __SCREAMING_SNAKE_CASE : str=[4.0, 4.0, 4.0] , __SCREAMING_SNAKE_CASE : int=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE : List[str]=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE : Tuple=[0.0, 0.0, 0.1] , __SCREAMING_SNAKE_CASE : Optional[Any]=[True, True, True] , __SCREAMING_SNAKE_CASE : str=[False, False, True] , __SCREAMING_SNAKE_CASE : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , __SCREAMING_SNAKE_CASE : List[Any]=[3, 3, 3] , __SCREAMING_SNAKE_CASE : List[str]=[1, 1, 1] , __SCREAMING_SNAKE_CASE : Union[str, Any]=[2, 2, 2] , __SCREAMING_SNAKE_CASE : Dict=[1, 1, 1] , __SCREAMING_SNAKE_CASE : Dict=[1, 1, 1] , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Tuple=1e-12 , **__SCREAMING_SNAKE_CASE : int , ) -> Optional[int]:
super().__init__(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =num_channels
__UpperCAmelCase =patch_sizes
__UpperCAmelCase =patch_stride
__UpperCAmelCase =patch_padding
__UpperCAmelCase =embed_dim
__UpperCAmelCase =num_heads
__UpperCAmelCase =depth
__UpperCAmelCase =mlp_ratio
__UpperCAmelCase =attention_drop_rate
__UpperCAmelCase =drop_rate
__UpperCAmelCase =drop_path_rate
__UpperCAmelCase =qkv_bias
__UpperCAmelCase =cls_token
__UpperCAmelCase =qkv_projection_method
__UpperCAmelCase =kernel_qkv
__UpperCAmelCase =padding_kv
__UpperCAmelCase =stride_kv
__UpperCAmelCase =padding_q
__UpperCAmelCase =stride_q
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
| 68 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "spiece.model"}
__A = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
__A = {
"AI-Sweden/gpt-sw3-126m": 20_48,
"AI-Sweden/gpt-sw3-350m": 20_48,
"AI-Sweden/gpt-sw3-1.6b": 20_48,
"AI-Sweden/gpt-sw3-6.7b": 20_48,
"AI-Sweden/gpt-sw3-20b": 20_48,
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
__UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCAmelCase =kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
__UpperCAmelCase ="""None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__UpperCAmelCase ="""<|endoftext|>""" if eos_token is None else eos_token
__UpperCAmelCase ="""<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__UpperCAmelCase =unk_token if pad_token is None else pad_token
__UpperCAmelCase =eos_token if bos_token is None else bos_token
else:
__UpperCAmelCase ="""<pad>""" if pad_token is None else pad_token
__UpperCAmelCase ="""<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =do_lower_case
__UpperCAmelCase =remove_space
__UpperCAmelCase =keep_accents
__UpperCAmelCase =vocab_file
__UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# Used for whitespace normalization in input texts
# fmt : off
__UpperCAmelCase ={""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__UpperCAmelCase =re.compile(
f'''[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__( self : Any ) -> str:
__UpperCAmelCase =self.__dict__.copy()
__UpperCAmelCase =None
return state
def __setstate__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCAmelCase ={}
__UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _a ( self : Union[str, Any] ) -> int:
return len(self.sp_model )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str ) -> str:
__UpperCAmelCase =self.non_printing_characters_re.sub("""""" , __SCREAMING_SNAKE_CASE )
# Normalize whitespaces
__UpperCAmelCase ="""""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
__UpperCAmelCase =unicodedata.normalize("""NFC""" , __SCREAMING_SNAKE_CASE )
return text
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
__UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE )
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> int:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> str:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
@staticmethod
def _a ( __SCREAMING_SNAKE_CASE : str ) -> str:
return out_string
def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> str:
__UpperCAmelCase =[]
__UpperCAmelCase =""""""
__UpperCAmelCase =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
__UpperCAmelCase =True
__UpperCAmelCase =[]
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string
def _a ( self : Any ) -> Dict[str, int]:
__UpperCAmelCase ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase =os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi:
__UpperCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase =[self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text]
__UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE )
if return_tensors is True or return_tensors == "pt":
__UpperCAmelCase =torch.tensor(__SCREAMING_SNAKE_CASE )
return token_ids
def _a ( self : str , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str:
return self.sp_model.decode(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]:
__UpperCAmelCase =[f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
__UpperCAmelCase =(
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(__SCREAMING_SNAKE_CASE ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=__SCREAMING_SNAKE_CASE )
| 68 | 1 |
from PIL import Image
def lowercase__ ( A_: Image , A_: float ) -> Image:
"""simple docstring"""
def brightness(A_: int ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(A_ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
__A = change_brightness(img, 1_00)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 68 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__A = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Path , __SCREAMING_SNAKE_CASE : Union[str, None] = None , __SCREAMING_SNAKE_CASE : Union[List[str], None] = None , __SCREAMING_SNAKE_CASE : Union[str, List[str], None] = None , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[str]:
__UpperCAmelCase =[file for file in os.listdir(__SCREAMING_SNAKE_CASE ) if os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )]
if identifier is not None:
__UpperCAmelCase =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for n_ in n_identifier:
__UpperCAmelCase =[file for file in files if n_ not in file]
else:
__UpperCAmelCase =[file for file in files if n_identifier not in file]
__UpperCAmelCase =ignore_files or []
ignore_files.append("""__init__.py""" )
__UpperCAmelCase =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __SCREAMING_SNAKE_CASE )
if only_modules:
__UpperCAmelCase =file.split(""".""" )[0]
try:
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =doctest.DocTestSuite(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =unittest.TextTestRunner().run(__SCREAMING_SNAKE_CASE )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__UpperCAmelCase =doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _a ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""modeling"""
__UpperCAmelCase =[
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""tokenization"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""configuration"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ) -> Tuple:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase =["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , n_identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase =Path("""docs/source""" )
__UpperCAmelCase =["""favicon.ico"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE , only_modules=__SCREAMING_SNAKE_CASE )
| 68 | 1 |
def lowercase__ ( A_: Optional[int] , A_: Optional[Any] , A_: Optional[int] ) -> Any:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(A_ , n - 1 , A_ ) * a) % mod
else:
__UpperCAmelCase =binary_exponentiation(A_ , n / 2 , A_ )
return (b * b) % mod
# a prime number
__A = 7_01
__A = 10_00_00_00_00
__A = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 68 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__A = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase__ ( A_: int , A_: Optional[Any] , A_: List[str]=None ) -> List[str]:
"""simple docstring"""
if rng is None:
__UpperCAmelCase =random.Random()
__UpperCAmelCase =1
for dim in shape:
total_dims *= dim
__UpperCAmelCase =[]
for _ in range(A_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__UpperCAmelCase =np.array(A_ , dtype=jnp.intaa ).reshape(A_ )
return output
def lowercase__ ( A_: List[str] , A_: List[str]=None ) -> Any:
"""simple docstring"""
__UpperCAmelCase =ids_tensor(A_ , vocab_size=2 , rng=A_ )
# make sure that at least one token is attended to for each batch
__UpperCAmelCase =1
return attn_mask
@require_flax
class _A :
"""simple docstring"""
lowerCamelCase : Optional[Any] = None
lowerCamelCase : int = ()
def _a ( self : str ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__UpperCAmelCase =2
__UpperCAmelCase =inputs["""input_ids"""].shape[-1] // 2
__UpperCAmelCase =inputs["""input_ids"""][:max_batch_size, :sequence_length]
__UpperCAmelCase =jnp.ones_like(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__UpperCAmelCase =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__UpperCAmelCase =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _a ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =0
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =pt_model_class(__SCREAMING_SNAKE_CASE ).eval()
__UpperCAmelCase =load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , flax_model.params )
__UpperCAmelCase =flax_model.generate(__SCREAMING_SNAKE_CASE ).sequences
__UpperCAmelCase =pt_model.generate(torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__UpperCAmelCase =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _a ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
__UpperCAmelCase =0.8
__UpperCAmelCase =10
__UpperCAmelCase =0.3
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =2
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ) -> Any:
__UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
__UpperCAmelCase =FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__UpperCAmelCase ="""Hello world"""
__UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """do_samples""" ):
model.generate(__SCREAMING_SNAKE_CASE , do_samples=__SCREAMING_SNAKE_CASE )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """foo""" ):
__UpperCAmelCase ={"""foo""": """bar"""}
model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 68 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowercase__ ( A_: str ) -> YolosConfig:
"""simple docstring"""
__UpperCAmelCase =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__UpperCAmelCase =192
__UpperCAmelCase =768
__UpperCAmelCase =12
__UpperCAmelCase =3
__UpperCAmelCase =[800, 1333]
__UpperCAmelCase =False
elif yolos_name == "yolos_s_dWr":
__UpperCAmelCase =330
__UpperCAmelCase =14
__UpperCAmelCase =6
__UpperCAmelCase =1320
elif "yolos_s" in yolos_name:
__UpperCAmelCase =384
__UpperCAmelCase =1536
__UpperCAmelCase =12
__UpperCAmelCase =6
elif "yolos_b" in yolos_name:
__UpperCAmelCase =[800, 1344]
__UpperCAmelCase =91
__UpperCAmelCase ="""huggingface/label-files"""
__UpperCAmelCase ="""coco-detection-id2label.json"""
__UpperCAmelCase =json.load(open(hf_hub_download(A_ , A_ , repo_type="""dataset""" ) , """r""" ) )
__UpperCAmelCase ={int(A_ ): v for k, v in idalabel.items()}
__UpperCAmelCase =idalabel
__UpperCAmelCase ={v: k for k, v in idalabel.items()}
return config
def lowercase__ ( A_: dict , A_: YolosConfig , A_: bool = False ) -> Tuple:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCAmelCase =state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__UpperCAmelCase =state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase =in_proj_weight[: config.hidden_size, :]
__UpperCAmelCase =in_proj_bias[: config.hidden_size]
__UpperCAmelCase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCAmelCase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCAmelCase =in_proj_weight[-config.hidden_size :, :]
__UpperCAmelCase =in_proj_bias[-config.hidden_size :]
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
if "backbone" in name:
__UpperCAmelCase =name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
__UpperCAmelCase =name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
__UpperCAmelCase =name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
__UpperCAmelCase =name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
__UpperCAmelCase =name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__UpperCAmelCase =name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
__UpperCAmelCase =name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__UpperCAmelCase =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__UpperCAmelCase =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__UpperCAmelCase =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__UpperCAmelCase =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__UpperCAmelCase =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__UpperCAmelCase =name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
__UpperCAmelCase =name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
__UpperCAmelCase =name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
__UpperCAmelCase =name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def lowercase__ ( A_: dict , A_: YolosForObjectDetection ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__UpperCAmelCase =orig_state_dict.pop(A_ )
if "qkv" in key:
__UpperCAmelCase =key.split(""".""" )
__UpperCAmelCase =int(key_split[2] )
__UpperCAmelCase =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__UpperCAmelCase =val[:dim, :]
__UpperCAmelCase =val[
dim : dim * 2, :
]
__UpperCAmelCase =val[-dim:, :]
else:
__UpperCAmelCase =val[:dim]
__UpperCAmelCase =val[dim : dim * 2]
__UpperCAmelCase =val[-dim:]
else:
__UpperCAmelCase =val
return orig_state_dict
def lowercase__ ( ) -> torch.Tensor:
"""simple docstring"""
__UpperCAmelCase ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase =Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def lowercase__ ( A_: str , A_: str , A_: str , A_: bool = False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =get_yolos_config(A_ )
# load original state_dict
__UpperCAmelCase =torch.load(A_ , map_location="""cpu""" )["""model"""]
# load 🤗 model
__UpperCAmelCase =YolosForObjectDetection(A_ )
model.eval()
__UpperCAmelCase =convert_state_dict(A_ , A_ )
model.load_state_dict(A_ )
# Check outputs on an image, prepared by YolosImageProcessor
__UpperCAmelCase =800 if yolos_name != """yolos_ti""" else 512
__UpperCAmelCase =YolosImageProcessor(format="""coco_detection""" , size=A_ )
__UpperCAmelCase =image_processor(images=prepare_img() , return_tensors="""pt""" )
__UpperCAmelCase =model(**A_ )
__UpperCAmelCase , __UpperCAmelCase =outputs.logits, outputs.pred_boxes
__UpperCAmelCase , __UpperCAmelCase =None, None
if yolos_name == "yolos_ti":
__UpperCAmelCase =torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
__UpperCAmelCase =torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
__UpperCAmelCase =torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
__UpperCAmelCase =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
__UpperCAmelCase =torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
__UpperCAmelCase =torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
__UpperCAmelCase =torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
__UpperCAmelCase =torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
__UpperCAmelCase =torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
__UpperCAmelCase =torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , A_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , A_ , atol=1e-4 )
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A_ )
if push_to_hub:
__UpperCAmelCase ={
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
__UpperCAmelCase =model_mapping[yolos_name]
image_processor.push_to_hub(A_ , organization="""hustvl""" )
model.push_to_hub(A_ , organization="""hustvl""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 68 |
from __future__ import annotations
from collections.abc import Iterator
class _A :
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> None:
__UpperCAmelCase =value
__UpperCAmelCase =None
__UpperCAmelCase =None
class _A :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Node ) -> None:
__UpperCAmelCase =tree
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Node | None ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 1 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 68 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowercase__ ( A_: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A_ , AssumeRolePolicyDocument=json.dumps(A_ , indent=2 ) )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def lowercase__ ( A_: Dict ) -> Any:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
return iam_client.get_role(RoleName=A_ )["Role"]["Arn"]
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =_ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , A_ , )
__UpperCAmelCase =None
if credentials_configuration == 0:
__UpperCAmelCase =_ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
__UpperCAmelCase =aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__UpperCAmelCase =_ask_field("""AWS Access Key ID: """ )
__UpperCAmelCase =aws_access_key_id
__UpperCAmelCase =_ask_field("""AWS Secret Access Key: """ )
__UpperCAmelCase =aws_secret_access_key
__UpperCAmelCase =_ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
__UpperCAmelCase =aws_region
__UpperCAmelCase =_ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , A_ , )
if role_management == 0:
__UpperCAmelCase =_ask_field("""Enter your IAM role name: """ )
else:
__UpperCAmelCase ="""accelerate_sagemaker_execution_role"""
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(A_ )
__UpperCAmelCase =_ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_custom_docker_image:
__UpperCAmelCase =_ask_field("""Enter your Docker image: """ , lambda A_ : str(A_ ).lower() )
__UpperCAmelCase =_ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_inputs_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_metrics_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
__UpperCAmelCase ={}
__UpperCAmelCase =_ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
__UpperCAmelCase ="""dynamo_"""
__UpperCAmelCase =_ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__UpperCAmelCase =_ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
__UpperCAmelCase =_ask_options(
"""Which mode do you want to use?""" , A_ , lambda A_ : TORCH_DYNAMO_MODES[int(A_ )] , default="""default""" , )
__UpperCAmelCase =_ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase ="""Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__UpperCAmelCase =_ask_options(
A_ , A_ , lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__UpperCAmelCase =_ask_field(A_ , lambda A_ : str(A_ ).lower() , default="""ml.p3.2xlarge""" )
__UpperCAmelCase =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__UpperCAmelCase =_ask_field(
"""How many machines do you want use? [1]: """ , A_ , default=1 , )
__UpperCAmelCase =_ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=A_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A_ , use_cpu=A_ , dynamo_config=A_ , eca_instance_type=A_ , profile=A_ , region=A_ , iam_role_name=A_ , mixed_precision=A_ , num_machines=A_ , sagemaker_inputs_file=A_ , sagemaker_metrics_file=A_ , )
| 68 | 1 |
__A = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 68 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'ctrl'
lowerCamelCase : Any = ['past_key_values']
lowerCamelCase : Optional[int] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=246534 , __SCREAMING_SNAKE_CASE : int=256 , __SCREAMING_SNAKE_CASE : Optional[Any]=1280 , __SCREAMING_SNAKE_CASE : Optional[Any]=8192 , __SCREAMING_SNAKE_CASE : int=48 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=1e-6 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , **__SCREAMING_SNAKE_CASE : int , ) -> Any:
__UpperCAmelCase =vocab_size
__UpperCAmelCase =n_positions
__UpperCAmelCase =n_embd
__UpperCAmelCase =n_layer
__UpperCAmelCase =n_head
__UpperCAmelCase =dff
__UpperCAmelCase =resid_pdrop
__UpperCAmelCase =embd_pdrop
__UpperCAmelCase =layer_norm_epsilon
__UpperCAmelCase =initializer_range
__UpperCAmelCase =use_cache
super().__init__(**__SCREAMING_SNAKE_CASE )
| 68 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[str] = TextToVideoSDPipeline
lowerCamelCase : str = TEXT_TO_IMAGE_PARAMS
lowerCamelCase : int = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowerCamelCase : Tuple = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def _a ( self : Optional[int] ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
__UpperCAmelCase =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__UpperCAmelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__UpperCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
__UpperCAmelCase =CLIPTextModel(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCAmelCase ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def _a ( self : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=0 ) -> Optional[Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__UpperCAmelCase =torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase =torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def _a ( self : Tuple ) -> int:
__UpperCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase =self.get_dummy_components()
__UpperCAmelCase =TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""np"""
__UpperCAmelCase =sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__UpperCAmelCase =frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__UpperCAmelCase =np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : List[Any] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self : Optional[Any] ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _a ( self : Tuple ) -> str:
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _a ( self : List[str] ) -> int:
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def _a ( self : Any ) -> List[Any]:
pass
def _a ( self : Tuple ) -> Optional[int]:
return super().test_progress_bar()
@slow
@skip_mps
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ) -> Any:
__UpperCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__UpperCAmelCase =TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__UpperCAmelCase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__UpperCAmelCase =pipe.to("""cuda""" )
__UpperCAmelCase ="""Spiderman is surfing"""
__UpperCAmelCase =torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase =pipe(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="""pt""" ).frames
__UpperCAmelCase =video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def _a ( self : Union[str, Any] ) -> int:
__UpperCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__UpperCAmelCase =TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__UpperCAmelCase =pipe.to("""cuda""" )
__UpperCAmelCase ="""Spiderman is surfing"""
__UpperCAmelCase =torch.Generator(device="""cpu""" ).manual_seed(0 )
__UpperCAmelCase =pipe(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""pt""" ).frames
__UpperCAmelCase =video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 68 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowercase__ ( A_: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase =k.replace(A_ , A_ )
if k.startswith("""encoder""" ):
__UpperCAmelCase =k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowercase__ ( A_: Tuple ) -> str:
"""simple docstring"""
__UpperCAmelCase =[
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase =sd.pop(A_ )
__UpperCAmelCase =k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase =v
__A = ["START"]
@torch.no_grad()
def lowercase__ ( A_: List[Any] , A_: str , A_: int ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase =torch.load(A_ , map_location="""cpu""" )
__UpperCAmelCase =model["""model"""]
__UpperCAmelCase =BlenderbotConfig.from_json_file(A_ )
__UpperCAmelCase =BlenderbotForConditionalGeneration(A_ )
__UpperCAmelCase =m.model.state_dict().keys()
__UpperCAmelCase =[]
__UpperCAmelCase ={}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase =rename_state_dict_key(A_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase =v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(A_ )
m.model.load_state_dict(A_ , strict=A_ )
m.half()
m.save_pretrained(A_ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__A = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 68 | 1 |
import math
import sys
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
__UpperCAmelCase =""""""
try:
with open(A_ , """rb""" ) as binary_file:
__UpperCAmelCase =binary_file.read()
for dat in data:
__UpperCAmelCase =F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
__UpperCAmelCase ={"""0""": """0""", """1""": """1"""}
__UpperCAmelCase , __UpperCAmelCase ="""""", """"""
__UpperCAmelCase =len(A_ )
for i in range(len(A_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__UpperCAmelCase =lexicon[curr_string]
result += last_match_id
__UpperCAmelCase =last_match_id + """0"""
if math.loga(A_ ).is_integer():
__UpperCAmelCase ={}
for curr_key in list(A_ ):
__UpperCAmelCase =lexicon.pop(A_ )
__UpperCAmelCase =new_lex
__UpperCAmelCase =last_match_id + """1"""
index += 1
__UpperCAmelCase =""""""
return result
def lowercase__ ( A_: str , A_: str ) -> None:
"""simple docstring"""
__UpperCAmelCase =8
try:
with open(A_ , """wb""" ) as opened_file:
__UpperCAmelCase =[
to_write[i : i + byte_length]
for i in range(0 , len(A_ ) , A_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A_ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
__UpperCAmelCase =0
for letter in data_bits:
if letter == "1":
break
counter += 1
__UpperCAmelCase =data_bits[counter:]
__UpperCAmelCase =data_bits[counter + 1 :]
return data_bits
def lowercase__ ( A_: str , A_: str ) -> None:
"""simple docstring"""
__UpperCAmelCase =read_file_binary(A_ )
__UpperCAmelCase =remove_prefix(A_ )
__UpperCAmelCase =decompress_data(A_ )
write_file_binary(A_ , A_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 68 |
from itertools import permutations
def lowercase__ ( A_: tuple ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__UpperCAmelCase =[7, 11, 13, 17]
for i, test in enumerate(A_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase__ ( A_: int = 10 ) -> int:
"""simple docstring"""
return sum(
int("""""".join(map(A_ , A_ ) ) )
for num in permutations(range(A_ ) )
if is_substring_divisible(A_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 68 | 1 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__A = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
__A = parser.parse_args()
if args.model_type == "roberta":
__A = RobertaForMaskedLM.from_pretrained(args.model_name)
__A = "roberta"
elif args.model_type == "gpt2":
__A = GPTaLMHeadModel.from_pretrained(args.model_name)
__A = "transformer"
__A = model.state_dict()
__A = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__A = state_dict[F"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__A = F"""{prefix}.embeddings.{w}.weight"""
__A = state_dict[param_name]
for w in ["weight", "bias"]:
__A = F"""{prefix}.embeddings.LayerNorm.{w}"""
__A = state_dict[param_name]
# Transformer Blocks #
__A = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__A = state_dict[
F"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
__A = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__A = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__A = state_dict[F"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__A = state_dict[F"""lm_head.dense.{w}"""]
__A = state_dict[F"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__A = state_dict[F"""{prefix}.ln_f.{w}"""]
__A = state_dict["lm_head.weight"]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 68 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__A = TypeVar("T")
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (position - 1) // 2
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (2 * position) + 1
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (2 * position) + 2
class _A ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] ) -> None:
__UpperCAmelCase =[]
__UpperCAmelCase ={}
__UpperCAmelCase =0
def __len__( self : str ) -> int:
return self.elements
def __repr__( self : Dict ) -> str:
return str(self.heap )
def _a ( self : Optional[int] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__UpperCAmelCase =self.elements
self.elements += 1
self._bubble_up(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__UpperCAmelCase , __UpperCAmelCase =self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__UpperCAmelCase , __UpperCAmelCase =self.heap[0]
self._bubble_down(__SCREAMING_SNAKE_CASE )
return elem
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Update the weight of the given key
__UpperCAmelCase =self.position_map[elem]
__UpperCAmelCase =(elem, weight)
if position > 0:
__UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__UpperCAmelCase =self.position_map[elem]
if curr_pos == 0:
return None
__UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos]
__UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_up(__SCREAMING_SNAKE_CASE )
return None
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__UpperCAmelCase =self.position_map[elem]
__UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos]
__UpperCAmelCase =get_child_left_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =get_child_right_position(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements and child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position]
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
return None
if child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
return None
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None:
# Swap the nodes at the given positions
__UpperCAmelCase =self.heap[nodea_pos][0]
__UpperCAmelCase =self.heap[nodea_pos][0]
__UpperCAmelCase , __UpperCAmelCase =(
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__UpperCAmelCase =nodea_pos
__UpperCAmelCase =nodea_pos
class _A ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] ) -> None:
__UpperCAmelCase ={}
__UpperCAmelCase =0
def __repr__( self : Tuple ) -> str:
return str(self.connections )
def __len__( self : str ) -> int:
return self.nodes
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__UpperCAmelCase ={}
self.nodes += 1
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__SCREAMING_SNAKE_CASE )
self.add_node(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =weight
__UpperCAmelCase =weight
def lowercase__ ( A_: GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
"""simple docstring"""
__UpperCAmelCase ={node: maxsize for node in graph.connections}
__UpperCAmelCase ={node: None for node in graph.connections}
__UpperCAmelCase =MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(A_ , A_ )
if priority_queue.is_empty():
return dist, parent
# initialization
__UpperCAmelCase =priority_queue.extract_min()
__UpperCAmelCase =0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A_ , dist[neighbour] )
__UpperCAmelCase =node
# running prim's algorithm
while not priority_queue.is_empty():
__UpperCAmelCase =priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A_ , dist[neighbour] )
__UpperCAmelCase =node
return dist, parent
| 68 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["CLIPFeatureExtractor"]
__A = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 68 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A = logging.get_logger(__name__)
@dataclass
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Any , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase =deprecated_arg[3:]
__UpperCAmelCase =not kwargs.pop(__SCREAMING_SNAKE_CASE )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
__UpperCAmelCase =kwargs.pop("""tpu_name""" , self.tpu_name )
__UpperCAmelCase =kwargs.pop("""device_idx""" , self.device_idx )
__UpperCAmelCase =kwargs.pop("""eager_mode""" , self.eager_mode )
__UpperCAmelCase =kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**__SCREAMING_SNAKE_CASE )
lowerCamelCase : str = field(
default=UpperCamelCase , metadata={'help': 'Name of TPU'} , )
lowerCamelCase : int = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
lowerCamelCase : bool = field(default=UpperCamelCase , metadata={'help': 'Benchmark models in eager model.'} )
lowerCamelCase : bool = field(
default=UpperCamelCase , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def _a ( self : List[str] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["""tf"""] )
__UpperCAmelCase =None
if self.tpu:
try:
if self.tpu_name:
__UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__UpperCAmelCase =None
return tpu
@cached_property
def _a ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__UpperCAmelCase =tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
__UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
__UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def _a ( self : Optional[Any] ) -> bool:
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self : str ) -> "tf.distribute.Strategy":
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self : Dict ) -> Optional[int]:
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self : List[str] ) -> int:
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self : List[str] ) -> bool:
return self.n_gpu > 0
| 68 | 1 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__A = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowercase__ ( A_: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if isinstance(A_ , torch.Tensor ):
return image
elif isinstance(A_ , PIL.Image.Image ):
__UpperCAmelCase =[image]
__UpperCAmelCase =[trans(img.convert("""RGB""" ) ) for img in image]
__UpperCAmelCase =torch.stack(A_ )
return image
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
__UpperCAmelCase =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
# get the original timestep using init_timestep
__UpperCAmelCase =min(int(num_inference_steps * strength ) , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =max(num_inference_steps - init_timestep , 0 )
__UpperCAmelCase =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None ) -> Optional[Any]:
if not isinstance(__SCREAMING_SNAKE_CASE , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__SCREAMING_SNAKE_CASE )}''' )
__UpperCAmelCase =image.to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__UpperCAmelCase =init_latents.shape
__UpperCAmelCase =randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
# get latents
print("""add noise to latents at timestep""" , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.scheduler.add_noise(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =init_latents
return latents
@torch.no_grad()
def __call__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] = None , __SCREAMING_SNAKE_CASE : float = 0.8 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(__SCREAMING_SNAKE_CASE )
# 2. Preprocess image
__UpperCAmelCase =preprocess(__SCREAMING_SNAKE_CASE )
# 3. set timesteps
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device )
__UpperCAmelCase , __UpperCAmelCase =self.get_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__UpperCAmelCase =timesteps[:1].repeat(__SCREAMING_SNAKE_CASE )
# 4. Prepare latent variables
__UpperCAmelCase =self.prepare_latents(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.unet.dtype , self.device , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =latents
# 5. Denoising loop
for t in self.progress_bar(__SCREAMING_SNAKE_CASE ):
# 1. predict noise model_output
__UpperCAmelCase =self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCAmelCase =self.scheduler.step(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , use_clipped_model_output=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , ).prev_sample
__UpperCAmelCase =(image / 2 + 0.5).clamp(0 , 1 )
__UpperCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase =self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 68 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _a ( self : int ) -> Union[str, Any]:
__UpperCAmelCase =self.dummy_uncond_unet
__UpperCAmelCase =ScoreSdeVeScheduler()
__UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
sde_ve.to(__SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.manual_seed(0 )
__UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =torch.manual_seed(0 )
__UpperCAmelCase =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )[
0
]
__UpperCAmelCase =image[0, -3:, -3:, -1]
__UpperCAmelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ) -> int:
__UpperCAmelCase ="""google/ncsnpp-church-256"""
__UpperCAmelCase =UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ScoreSdeVeScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
sde_ve.to(__SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.manual_seed(0 )
__UpperCAmelCase =sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCAmelCase =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 68 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__A = logging.get_logger(__name__)
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = ['pixel_values']
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> None:
super().__init__(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =size if size is not None else {"""shortest_edge""": 224}
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
__UpperCAmelCase =do_resize
__UpperCAmelCase =size
__UpperCAmelCase =resample
__UpperCAmelCase =do_rescale
__UpperCAmelCase =rescale_factor
__UpperCAmelCase =do_center_crop
__UpperCAmelCase =crop_size
__UpperCAmelCase =do_flip_channel_order
def _a ( self : str , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : PILImageResampling = PIL.Image.BILINEAR , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> np.ndarray:
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
__UpperCAmelCase =get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=size["""shortest_edge"""] , default_to_square=__SCREAMING_SNAKE_CASE )
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> np.ndarray:
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(__SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[int, float] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Any:
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
return flip_channel_order(__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : float = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> PIL.Image.Image:
__UpperCAmelCase =do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase =resample if resample is not None else self.resample
__UpperCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase =(
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__UpperCAmelCase =size if size is not None else self.size
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
__UpperCAmelCase =make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
__UpperCAmelCase =[to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
__UpperCAmelCase =[self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
__UpperCAmelCase =[self.center_crop(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
__UpperCAmelCase =[self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__UpperCAmelCase =[self.flip_channel_order(image=__SCREAMING_SNAKE_CASE ) for image in images]
__UpperCAmelCase =[to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
__UpperCAmelCase ={"""pixel_values""": images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Tuple] = None ) -> Union[str, Any]:
__UpperCAmelCase =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =target_sizes.numpy()
__UpperCAmelCase =[]
for idx in range(len(__SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase =logits.argmax(dim=1 )
__UpperCAmelCase =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 68 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__A = logging.get_logger(__name__)
__A = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=None , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if config is None:
assert isinstance(self.model , __SCREAMING_SNAKE_CASE ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
__UpperCAmelCase =self.model.config
else:
__UpperCAmelCase =config
__UpperCAmelCase =data_args
__UpperCAmelCase =self.config.tgt_vocab_size if isinstance(self.config , __SCREAMING_SNAKE_CASE ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
__UpperCAmelCase =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__UpperCAmelCase =label_smoothed_nll_loss
def _a ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Any:
if self.optimizer is None:
__UpperCAmelCase =["""bias""", """LayerNorm.weight"""]
__UpperCAmelCase =[
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
__UpperCAmelCase =Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__UpperCAmelCase =Adafactor
__UpperCAmelCase ={"""scale_parameter""": False, """relative_step""": False}
else:
__UpperCAmelCase =AdamW
__UpperCAmelCase ={
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
__UpperCAmelCase =self.args.learning_rate
if self.sharded_ddp:
__UpperCAmelCase =OSS(
params=__SCREAMING_SNAKE_CASE , optim=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
__UpperCAmelCase =optimizer_cls(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.lr_scheduler is None:
__UpperCAmelCase =self._get_lr_scheduler(__SCREAMING_SNAKE_CASE )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
__UpperCAmelCase =arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__UpperCAmelCase =schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__UpperCAmelCase =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__UpperCAmelCase =schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE )
return scheduler
def _a ( self : Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
__UpperCAmelCase =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__UpperCAmelCase , __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[:2]
else:
# compute label smoothed loss
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
__UpperCAmelCase =torch.nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 )
__UpperCAmelCase , __UpperCAmelCase =self.loss_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
__UpperCAmelCase =inputs.pop("""labels""" )
__UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return loss
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, Union[torch.Tensor, Any]] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
__UpperCAmelCase =self._prepare_inputs(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__UpperCAmelCase =self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__SCREAMING_SNAKE_CASE , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
__UpperCAmelCase =inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
__UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__UpperCAmelCase =generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
# If PAD token is not defined at least EOS token has to be defined
__UpperCAmelCase =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
__UpperCAmelCase =pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__UpperCAmelCase =tensor
return padded_tensor
| 68 | 1 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__A = numpy.array([0, 0])
__A = numpy.array([0.5, 0.866_0254])
__A = numpy.array([1, 0])
__A = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowercase__ ( A_: list[numpy.ndarray] , A_: int ) -> list[numpy.ndarray]:
"""simple docstring"""
__UpperCAmelCase =initial_vectors
for _ in range(A_ ):
__UpperCAmelCase =iteration_step(A_ )
return vectors
def lowercase__ ( A_: list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
__UpperCAmelCase =[]
for i, start_vector in enumerate(vectors[:-1] ):
__UpperCAmelCase =vectors[i + 1]
new_vectors.append(A_ )
__UpperCAmelCase =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowercase__ ( A_: numpy.ndarray , A_: float ) -> numpy.ndarray:
"""simple docstring"""
__UpperCAmelCase =numpy.radians(A_ )
__UpperCAmelCase , __UpperCAmelCase =numpy.cos(A_ ), numpy.sin(A_ )
__UpperCAmelCase =numpy.array(((c, -s), (s, c)) )
return numpy.dot(A_ , A_ )
def lowercase__ ( A_: list[numpy.ndarray] ) -> None:
"""simple docstring"""
__UpperCAmelCase =plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__UpperCAmelCase , __UpperCAmelCase =zip(*A_ )
plt.plot(A_ , A_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 68 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str=0 ) -> Any:
__UpperCAmelCase =floats_tensor((1, 3, 128, 128) , rng=random.Random(__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =np.random.RandomState(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : Optional[Any] ) -> int:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# warmup pass to apply optimizations
__UpperCAmelCase =pipe(**self.get_dummy_inputs() )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self : List[str] ) -> Optional[int]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Dict ) -> int:
__UpperCAmelCase =ort.SessionOptions()
__UpperCAmelCase =False
return options
def _a ( self : Dict ) -> Any:
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase =init_image.resize((768, 512) )
# using the PNDM scheduler by default
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""A fantasy landscape, trending on artstation"""
__UpperCAmelCase =np.random.RandomState(0 )
__UpperCAmelCase =pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
__UpperCAmelCase =output.images
__UpperCAmelCase =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase =np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _a ( self : List[str] ) -> str:
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase =init_image.resize((768, 512) )
__UpperCAmelCase =LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""A fantasy landscape, trending on artstation"""
__UpperCAmelCase =np.random.RandomState(0 )
__UpperCAmelCase =pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
__UpperCAmelCase =output.images
__UpperCAmelCase =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase =np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 68 | 1 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 2, 1] , __SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 4] , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Any=2.0 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Tuple=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : Dict=8 , ) -> List[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =image_size
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =embed_dim
__UpperCAmelCase =depths
__UpperCAmelCase =num_heads
__UpperCAmelCase =window_size
__UpperCAmelCase =mlp_ratio
__UpperCAmelCase =qkv_bias
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =drop_path_rate
__UpperCAmelCase =hidden_act
__UpperCAmelCase =use_absolute_embeddings
__UpperCAmelCase =patch_norm
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =initializer_range
__UpperCAmelCase =is_training
__UpperCAmelCase =scope
__UpperCAmelCase =use_labels
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =encoder_stride
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase =None
if self.use_labels:
__UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase =self.get_config()
return config, pixel_values, labels
def _a ( self : List[Any] ) -> Optional[Any]:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
__UpperCAmelCase =SwinvaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Tuple:
__UpperCAmelCase =SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase =1
__UpperCAmelCase =SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__UpperCAmelCase =self.type_sequence_label_size
__UpperCAmelCase =SwinvaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : List[str] ) -> Tuple:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCamelCase : Tuple = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Dict = False
lowerCamelCase : Tuple = False
lowerCamelCase : List[str] = False
lowerCamelCase : Tuple = False
def _a ( self : str ) -> str:
__UpperCAmelCase =SwinvaModelTester(self )
__UpperCAmelCase =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 )
def _a ( self : List[Any] ) -> Optional[int]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : str ) -> str:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def _a ( self : Tuple ) -> Tuple:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def _a ( self : Optional[Any] ) -> int:
pass
def _a ( self : Tuple ) -> int:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _a ( self : str ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase =[*signature.parameters.keys()]
__UpperCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =True
for model_class in self.all_model_classes:
__UpperCAmelCase =True
__UpperCAmelCase =False
__UpperCAmelCase =True
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
__UpperCAmelCase =len(self.model_tester.depths )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase =True
__UpperCAmelCase =config.window_size**2
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__UpperCAmelCase =len(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__UpperCAmelCase =True
__UpperCAmelCase =True
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
__UpperCAmelCase =self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__UpperCAmelCase =2
self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.hidden_states
__UpperCAmelCase =getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# Swinv2 has a different seq_length
__UpperCAmelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase =outputs.reshaped_hidden_states
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =reshaped_hidden_states[0].shape
__UpperCAmelCase =(
reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a ( self : str ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =3
__UpperCAmelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Dict:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _a ( self : int ) -> Dict:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase =SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _a ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =_config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ) -> Dict:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def _a ( self : int ) -> Optional[int]:
__UpperCAmelCase =SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.default_image_processor
__UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCAmelCase =image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__UpperCAmelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 68 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__A = logging.getLogger(__name__)
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = 'sequence-classification'
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
if type(__SCREAMING_SNAKE_CASE ) == dict:
__UpperCAmelCase =Namespace(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =glue_output_modes[hparams.task]
__UpperCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.mode )
def _a ( self : str , **__SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
return self.model(**__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
__UpperCAmelCase ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__UpperCAmelCase =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
__UpperCAmelCase =self(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =outputs[0]
__UpperCAmelCase =self.trainer.lr_schedulers[0]["""scheduler"""]
__UpperCAmelCase ={"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _a ( self : Tuple ) -> List[Any]:
__UpperCAmelCase =self.hparams
__UpperCAmelCase =processors[args.task]()
__UpperCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
__UpperCAmelCase =self._feature_file(__SCREAMING_SNAKE_CASE )
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
__UpperCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
__UpperCAmelCase =convert_examples_to_features(
__SCREAMING_SNAKE_CASE , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __SCREAMING_SNAKE_CASE )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> DataLoader:
__UpperCAmelCase ="""dev""" if mode == """test""" else mode
__UpperCAmelCase =self._feature_file(__SCREAMING_SNAKE_CASE )
logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.load(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__UpperCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__UpperCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__UpperCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__UpperCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , batch_size=__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int ) -> str:
__UpperCAmelCase ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__UpperCAmelCase =batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
__UpperCAmelCase =self(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =outputs[:2]
__UpperCAmelCase =logits.detach().cpu().numpy()
__UpperCAmelCase =inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Any ) -> tuple:
__UpperCAmelCase =torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
__UpperCAmelCase =np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__UpperCAmelCase =np.argmax(__SCREAMING_SNAKE_CASE , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__UpperCAmelCase =np.squeeze(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =np.concatenate([x["""target"""] for x in outputs] , axis=0 )
__UpperCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
__UpperCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
__UpperCAmelCase ={**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
__UpperCAmelCase =dict(results.items() )
__UpperCAmelCase =results
return ret, preds_list, out_label_list
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : list ) -> dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._eval_end(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._eval_end(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _a ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def lowercase__ ( ) -> str:
"""simple docstring"""
__UpperCAmelCase =argparse.ArgumentParser()
add_generic_args(A_ , os.getcwd() )
__UpperCAmelCase =GLUETransformer.add_model_specific_args(A_ , os.getcwd() )
__UpperCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__UpperCAmelCase =os.path.join(
"""./results""" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__UpperCAmelCase =GLUETransformer(A_ )
__UpperCAmelCase =generic_train(A_ , A_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__UpperCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=A_ ) )
__UpperCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(A_ )
if __name__ == "__main__":
main()
| 68 | 1 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__A = None
__A = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__A = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class _A :
"""simple docstring"""
lowerCamelCase : bool = True
lowerCamelCase : Optional[str] = None
# Automatically constructed
lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCamelCase : str = field(default='Image' , init=UpperCamelCase , repr=UpperCamelCase )
def __call__( self : Tuple ) -> int:
return self.pa_type
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =np.array(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__SCREAMING_SNAKE_CASE )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _a ( self : str , __SCREAMING_SNAKE_CASE : dict , __SCREAMING_SNAKE_CASE : str=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
__UpperCAmelCase ={}
__UpperCAmelCase , __UpperCAmelCase =value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(__SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =PIL.Image.open(__SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase =path.split("""::""" )[-1]
try:
__UpperCAmelCase =string_to_dict(__SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )["""repo_id"""]
__UpperCAmelCase =token_per_repo_id.get(__SCREAMING_SNAKE_CASE )
except ValueError:
__UpperCAmelCase =None
with xopen(__SCREAMING_SNAKE_CASE , """rb""" , use_auth_token=__SCREAMING_SNAKE_CASE ) as f:
__UpperCAmelCase =BytesIO(f.read() )
__UpperCAmelCase =PIL.Image.open(bytes_ )
else:
__UpperCAmelCase =PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _a ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
__UpperCAmelCase =pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.binary() )
__UpperCAmelCase =pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__UpperCAmelCase =pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.string() )
__UpperCAmelCase =pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
__UpperCAmelCase =storage.field("""bytes""" )
else:
__UpperCAmelCase =pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
__UpperCAmelCase =storage.field("""path""" )
else:
__UpperCAmelCase =pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.string() )
__UpperCAmelCase =pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__UpperCAmelCase =pa.array(
[encode_np_array(np.array(__SCREAMING_SNAKE_CASE ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__UpperCAmelCase =pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.string() )
__UpperCAmelCase =pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__SCREAMING_SNAKE_CASE , self.pa_type )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : pa.StructArray ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(__SCREAMING_SNAKE_CASE : Optional[int] ):
with xopen(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__UpperCAmelCase =f.read()
return bytes_
__UpperCAmelCase =pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__UpperCAmelCase =pa.array(
[os.path.basename(__SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
__UpperCAmelCase =pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__SCREAMING_SNAKE_CASE , self.pa_type )
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__UpperCAmelCase =list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowercase__ ( A_: "PIL.Image.Image" ) -> bytes:
"""simple docstring"""
__UpperCAmelCase =BytesIO()
if image.format in list_image_compression_formats():
__UpperCAmelCase =image.format
else:
__UpperCAmelCase ="""PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(A_ , format=A_ )
return buffer.getvalue()
def lowercase__ ( A_: "PIL.Image.Image" ) -> dict:
"""simple docstring"""
if hasattr(A_ , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(A_ )}
def lowercase__ ( A_: np.ndarray ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
__UpperCAmelCase =array.dtype
__UpperCAmelCase =dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
__UpperCAmelCase =dtype.kind
__UpperCAmelCase =dtype.itemsize
__UpperCAmelCase =None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__UpperCAmelCase =np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__UpperCAmelCase =dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__UpperCAmelCase =dtype_byteorder + dtype_kind + str(A_ )
__UpperCAmelCase =np.dtype(A_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
__UpperCAmelCase =PIL.Image.fromarray(array.astype(A_ ) )
return {"path": None, "bytes": image_to_bytes(A_ )}
def lowercase__ ( A_: Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
__UpperCAmelCase , __UpperCAmelCase =first_non_null_value(A_ )
if isinstance(A_ , A_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(A_ , np.ndarray ):
__UpperCAmelCase =no_op_if_value_is_null(A_ )
return [obj_to_image_dict_func(A_ ) for obj in objs]
elif isinstance(A_ , PIL.Image.Image ):
__UpperCAmelCase =no_op_if_value_is_null(A_ )
return [obj_to_image_dict_func(A_ ) for obj in objs]
else:
return objs
else:
return objs
| 68 |
def lowercase__ ( A_: int , A_: int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowercase__ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 68 | 1 |
import math
import unittest
def lowercase__ ( A_: int ) -> bool:
"""simple docstring"""
assert isinstance(A_ , A_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _a ( self : Dict ) -> List[str]:
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 68 |
from __future__ import annotations
import bisect
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
__UpperCAmelCase =len(A_ )
while lo < hi:
__UpperCAmelCase =lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__UpperCAmelCase =mid + 1
else:
__UpperCAmelCase =mid
return lo
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
__UpperCAmelCase =len(A_ )
while lo < hi:
__UpperCAmelCase =lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__UpperCAmelCase =mid + 1
else:
__UpperCAmelCase =mid
return lo
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_left(A_ , A_ , A_ , A_ ) , A_ )
def lowercase__ ( A_: list[int] , A_: int , A_: int = 0 , A_: int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_right(A_ , A_ , A_ , A_ ) , A_ )
def lowercase__ ( A_: list[int] , A_: int ) -> int | None:
"""simple docstring"""
__UpperCAmelCase =0
__UpperCAmelCase =len(A_ ) - 1
while left <= right:
__UpperCAmelCase =left + (right - left) // 2
__UpperCAmelCase =sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__UpperCAmelCase =midpoint - 1
else:
__UpperCAmelCase =midpoint + 1
return None
def lowercase__ ( A_: list[int] , A_: int ) -> int | None:
"""simple docstring"""
__UpperCAmelCase =bisect.bisect_left(A_ , A_ )
if index != len(A_ ) and sorted_collection[index] == item:
return index
return None
def lowercase__ ( A_: list[int] , A_: int , A_: int , A_: int ) -> int | None:
"""simple docstring"""
if right < left:
return None
__UpperCAmelCase =left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(A_ , A_ , A_ , midpoint - 1 )
else:
return binary_search_by_recursion(A_ , A_ , midpoint + 1 , A_ )
if __name__ == "__main__":
__A = input("Enter numbers separated by comma:\n").strip()
__A = sorted(int(item) for item in user_input.split(","))
__A = int(input("Enter a single number to be found in the list:\n"))
__A = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 68 | 1 |
from __future__ import annotations
def lowercase__ ( A_: list[list[int]] ) -> int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
from typing import List
from .keymap import KEYMAP, get_character
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
def decorator(A_: int ):
__UpperCAmelCase =getattr(A_ , """handle_key""" , [] )
handle += [key]
setattr(A_ , """handle_key""" , A_ )
return func
return decorator
def lowercase__ ( *A_: List[str] ) -> Optional[int]:
"""simple docstring"""
def decorator(A_: Tuple ):
__UpperCAmelCase =getattr(A_ , """handle_key""" , [] )
handle += keys
setattr(A_ , """handle_key""" , A_ )
return func
return decorator
class _A ( UpperCamelCase ):
"""simple docstring"""
def __new__( cls : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> int:
__UpperCAmelCase =super().__new__(cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not hasattr(__SCREAMING_SNAKE_CASE , """key_handler""" ):
setattr(__SCREAMING_SNAKE_CASE , """key_handler""" , {} )
setattr(__SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] )
for key in handled_keys:
__UpperCAmelCase =value
return new_cls
@staticmethod
def _a ( cls : Dict ) -> List[Any]:
__UpperCAmelCase =get_character()
if char != KEYMAP["undefined"]:
__UpperCAmelCase =ord(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =cls.key_handler.get(__SCREAMING_SNAKE_CASE )
if handler:
__UpperCAmelCase =char
return handler(cls )
else:
return None
def lowercase__ ( cls: str ) -> int:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 68 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__A = (7_20, 12_80) # Height, Width
__A = (0.4, 0.6) # if height or width lower than this scale, drop it.
__A = 1 / 1_00
__A = ""
__A = ""
__A = ""
__A = 2_50
def lowercase__ ( ) -> None:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase =get_dataset(A_ , A_ )
for index in range(A_ ):
__UpperCAmelCase =random.sample(range(len(A_ ) ) , 4 )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =update_image_and_anno(
A_ , A_ , A_ , A_ , A_ , filter_scale=A_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase =random_chars(32 )
__UpperCAmelCase =path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
__UpperCAmelCase =F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , A_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
__UpperCAmelCase =[]
for anno in new_annos:
__UpperCAmelCase =anno[3] - anno[1]
__UpperCAmelCase =anno[4] - anno[2]
__UpperCAmelCase =anno[1] + width / 2
__UpperCAmelCase =anno[2] + height / 2
__UpperCAmelCase =F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(A_ )
with open(F'''{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def lowercase__ ( A_: str , A_: str ) -> tuple[list, list]:
"""simple docstring"""
__UpperCAmelCase =[]
__UpperCAmelCase =[]
for label_file in glob.glob(os.path.join(A_ , """*.txt""" ) ):
__UpperCAmelCase =label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(A_ ) as in_file:
__UpperCAmelCase =in_file.readlines()
__UpperCAmelCase =os.path.join(A_ , F'''{label_name}.jpg''' )
__UpperCAmelCase =[]
for obj_list in obj_lists:
__UpperCAmelCase =obj_list.rstrip("""\n""" ).split(""" """ )
__UpperCAmelCase =float(obj[1] ) - float(obj[3] ) / 2
__UpperCAmelCase =float(obj[2] ) - float(obj[4] ) / 2
__UpperCAmelCase =float(obj[1] ) + float(obj[3] ) / 2
__UpperCAmelCase =float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(A_ )
labels.append(A_ )
return img_paths, labels
def lowercase__ ( A_: list , A_: list , A_: list[int] , A_: tuple[int, int] , A_: tuple[float, float] , A_: float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
__UpperCAmelCase =np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCAmelCase =scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCAmelCase =scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCAmelCase =int(scale_x * output_size[1] )
__UpperCAmelCase =int(scale_y * output_size[0] )
__UpperCAmelCase =[]
__UpperCAmelCase =[]
for i, index in enumerate(A_ ):
__UpperCAmelCase =all_img_list[index]
path_list.append(A_ )
__UpperCAmelCase =all_annos[index]
__UpperCAmelCase =cva.imread(A_ )
if i == 0: # top-left
__UpperCAmelCase =cva.resize(A_ , (divid_point_x, divid_point_y) )
__UpperCAmelCase =img
for bbox in img_annos:
__UpperCAmelCase =bbox[1] * scale_x
__UpperCAmelCase =bbox[2] * scale_y
__UpperCAmelCase =bbox[3] * scale_x
__UpperCAmelCase =bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCAmelCase =cva.resize(A_ , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCAmelCase =img
for bbox in img_annos:
__UpperCAmelCase =scale_x + bbox[1] * (1 - scale_x)
__UpperCAmelCase =bbox[2] * scale_y
__UpperCAmelCase =scale_x + bbox[3] * (1 - scale_x)
__UpperCAmelCase =bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCAmelCase =cva.resize(A_ , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCAmelCase =img
for bbox in img_annos:
__UpperCAmelCase =bbox[1] * scale_x
__UpperCAmelCase =scale_y + bbox[2] * (1 - scale_y)
__UpperCAmelCase =bbox[3] * scale_x
__UpperCAmelCase =scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCAmelCase =cva.resize(
A_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCAmelCase =img
for bbox in img_annos:
__UpperCAmelCase =scale_x + bbox[1] * (1 - scale_x)
__UpperCAmelCase =scale_y + bbox[2] * (1 - scale_y)
__UpperCAmelCase =scale_x + bbox[3] * (1 - scale_x)
__UpperCAmelCase =scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCAmelCase =[
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowercase__ ( A_: int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase =ascii_lowercase + digits
return "".join(random.choice(A_ ) for _ in range(A_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 68 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 68 | 1 |
import math
def lowercase__ ( A_: int ) -> bool:
"""simple docstring"""
assert isinstance(A_ , A_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__UpperCAmelCase =range(3 , int(math.sqrt(A_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowercase__ ( A_: Any , A_: Optional[int]=1 , **A_: int ) -> int:
"""simple docstring"""
__UpperCAmelCase =factor * value
__UpperCAmelCase =value
while not is_prime(A_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **A_ )
return value
| 68 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]=99 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=4 , ) -> Optional[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =seq_length
__UpperCAmelCase =is_training
__UpperCAmelCase =use_attention_mask
__UpperCAmelCase =use_token_type_ids
__UpperCAmelCase =use_labels
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =num_choices
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase =None
if self.use_attention_mask:
__UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase =None
if self.use_token_type_ids:
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase =True
__UpperCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =FlaxRobertaModelTester(self )
@slow
def _a ( self : Optional[Any] ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase =model_class_name.from_pretrained("""roberta-base""" , from_pt=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 68 | 1 |
import torch
from transformers import AutoModel
class _A ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : str="sayef/fsner-bert-base-uncased" ) -> Optional[int]:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
__UpperCAmelCase =AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.nn.CosineSimilarity(3 , 1e-08 )
__UpperCAmelCase =torch.nn.Softmax(dim=1 )
def _a ( self : str , **__SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict ) -> str:
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str=1 ) -> Optional[int]:
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ) -> Any:
__UpperCAmelCase =W_supports["""sizes"""].tolist()
__UpperCAmelCase =W_supports["""start_token_id"""].item()
__UpperCAmelCase =W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCAmelCase =self.BERT(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.BERT(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =W_supports["""input_ids"""] == start_token_id
__UpperCAmelCase =W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
__UpperCAmelCase =0
else:
__UpperCAmelCase =support_sizes[i - 1]
__UpperCAmelCase =S[s : s + size][start_token_masks[s : s + size]]
__UpperCAmelCase =S[s : s + size][end_token_masks[s : s + size]]
__UpperCAmelCase =torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__UpperCAmelCase =torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCAmelCase =torch.vstack((p_starts, p_start) )
__UpperCAmelCase =torch.vstack((p_ends, p_end) )
else:
__UpperCAmelCase =p_start
__UpperCAmelCase =p_end
return p_starts, p_ends
| 68 |
from __future__ import annotations
def lowercase__ ( A_: list[list[int]] ) -> int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = ['pixel_values']
def __init__( self : str , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : Tuple , ) -> None:
super().__init__(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =size if size is not None else {"""height""": 384, """width""": 384}
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =do_resize
__UpperCAmelCase =size
__UpperCAmelCase =resample
__UpperCAmelCase =do_rescale
__UpperCAmelCase =rescale_factor
__UpperCAmelCase =do_normalize
__UpperCAmelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCAmelCase =image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCAmelCase =do_convert_rgb
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> np.ndarray:
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
__UpperCAmelCase =(size["""height"""], size["""width"""])
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[int, float] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any , ) -> Dict:
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any , ) -> np.ndarray:
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : int , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[float] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : List[str] , ) -> PIL.Image.Image:
__UpperCAmelCase =do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase =resample if resample is not None else self.resample
__UpperCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase =image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase =image_std if image_std is not None else self.image_std
__UpperCAmelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCAmelCase =size if size is not None else self.size
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCAmelCase =[convert_to_rgb(__SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
__UpperCAmelCase =[to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
__UpperCAmelCase =[self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
__UpperCAmelCase =[self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
__UpperCAmelCase =[self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
__UpperCAmelCase =[to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
__UpperCAmelCase =BatchFeature(data={"""pixel_values""": images} , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_outputs
| 68 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowercase__ ( A_: int , A_: int , A_: int , A_: int , A_: int , A_: int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__UpperCAmelCase =ksize + 1
__UpperCAmelCase =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(A_ ):
for x in range(A_ ):
# distance from center
__UpperCAmelCase =x - ksize // 2
__UpperCAmelCase =y - ksize // 2
# degree to radiant
__UpperCAmelCase =theta / 180 * np.pi
__UpperCAmelCase =np.cos(_theta )
__UpperCAmelCase =np.sin(_theta )
# get kernel x
__UpperCAmelCase =cos_theta * px + sin_theta * py
# get kernel y
__UpperCAmelCase =-sin_theta * px + cos_theta * py
# fill kernel
__UpperCAmelCase =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__A = imread("../image_data/lena.jpg")
# turn image in gray scale value
__A = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__A = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__A = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__A = out / out.max() * 2_55
__A = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 68 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
__A = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
__A = {
"facebook/bart-base": 10_24,
"facebook/bart-large": 10_24,
"facebook/bart-large-mnli": 10_24,
"facebook/bart-large-cnn": 10_24,
"facebook/bart-large-xsum": 10_24,
"yjernite/bart_eli5": 10_24,
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Union[str, Any] = ['input_ids', 'attention_mask']
lowerCamelCase : str = BartTokenizer
def __init__( self : int , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : List[Any]="replace" , __SCREAMING_SNAKE_CASE : str="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , __SCREAMING_SNAKE_CASE : Optional[Any]="<s>" , __SCREAMING_SNAKE_CASE : Optional[Any]="<unk>" , __SCREAMING_SNAKE_CASE : Dict="<pad>" , __SCREAMING_SNAKE_CASE : Optional[int]="<mask>" , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Dict:
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop("""type""" ) )
__UpperCAmelCase =add_prefix_space
__UpperCAmelCase =pre_tok_class(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__UpperCAmelCase ="""post_processor"""
__UpperCAmelCase =getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__UpperCAmelCase =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCAmelCase =tuple(state["""sep"""] )
if "cls" in state:
__UpperCAmelCase =tuple(state["""cls"""] )
__UpperCAmelCase =False
if state.get("""add_prefix_space""" , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__UpperCAmelCase =add_prefix_space
__UpperCAmelCase =True
if state.get("""trim_offsets""" , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__UpperCAmelCase =trim_offsets
__UpperCAmelCase =True
if changes_to_apply:
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , state.pop("""type""" ) )
__UpperCAmelCase =component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
def _a ( self : Any ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase =AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__UpperCAmelCase =value
def _a ( self : Tuple , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> BatchEncoding:
__UpperCAmelCase =kwargs.get("""is_split_into_words""" , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : List[Any] ) -> BatchEncoding:
__UpperCAmelCase =kwargs.get("""is_split_into_words""" , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
__UpperCAmelCase =self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None ) -> List[Any]:
__UpperCAmelCase =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase =[self.sep_token_id]
__UpperCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 68 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 2, 1] , __SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 4] , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Any=2.0 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Tuple=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : Dict=8 , ) -> List[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =image_size
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =embed_dim
__UpperCAmelCase =depths
__UpperCAmelCase =num_heads
__UpperCAmelCase =window_size
__UpperCAmelCase =mlp_ratio
__UpperCAmelCase =qkv_bias
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =drop_path_rate
__UpperCAmelCase =hidden_act
__UpperCAmelCase =use_absolute_embeddings
__UpperCAmelCase =patch_norm
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =initializer_range
__UpperCAmelCase =is_training
__UpperCAmelCase =scope
__UpperCAmelCase =use_labels
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =encoder_stride
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase =None
if self.use_labels:
__UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase =self.get_config()
return config, pixel_values, labels
def _a ( self : List[Any] ) -> Optional[Any]:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
__UpperCAmelCase =SwinvaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Tuple:
__UpperCAmelCase =SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase =1
__UpperCAmelCase =SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__UpperCAmelCase =self.type_sequence_label_size
__UpperCAmelCase =SwinvaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : List[str] ) -> Tuple:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCamelCase : Tuple = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Dict = False
lowerCamelCase : Tuple = False
lowerCamelCase : List[str] = False
lowerCamelCase : Tuple = False
def _a ( self : str ) -> str:
__UpperCAmelCase =SwinvaModelTester(self )
__UpperCAmelCase =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 )
def _a ( self : List[Any] ) -> Optional[int]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : str ) -> str:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def _a ( self : Tuple ) -> Tuple:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def _a ( self : Optional[Any] ) -> int:
pass
def _a ( self : Tuple ) -> int:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _a ( self : str ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase =[*signature.parameters.keys()]
__UpperCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =True
for model_class in self.all_model_classes:
__UpperCAmelCase =True
__UpperCAmelCase =False
__UpperCAmelCase =True
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
__UpperCAmelCase =len(self.model_tester.depths )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase =True
__UpperCAmelCase =config.window_size**2
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__UpperCAmelCase =len(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__UpperCAmelCase =True
__UpperCAmelCase =True
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
__UpperCAmelCase =self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__UpperCAmelCase =2
self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.hidden_states
__UpperCAmelCase =getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# Swinv2 has a different seq_length
__UpperCAmelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase =outputs.reshaped_hidden_states
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =reshaped_hidden_states[0].shape
__UpperCAmelCase =(
reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a ( self : str ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =3
__UpperCAmelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Dict:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _a ( self : int ) -> Dict:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase =SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _a ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =_config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ) -> Dict:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def _a ( self : int ) -> Optional[int]:
__UpperCAmelCase =SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.default_image_processor
__UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCAmelCase =image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__UpperCAmelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 68 | 1 |
def lowercase__ ( A_: str , A_: str ) -> Optional[Any]:
"""simple docstring"""
assert x is not None
assert y is not None
__UpperCAmelCase =len(A_ )
__UpperCAmelCase =len(A_ )
# declaring the array for storing the dp values
__UpperCAmelCase =[[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__UpperCAmelCase =1 if x[i - 1] == y[j - 1] else 0
__UpperCAmelCase =max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__UpperCAmelCase =""""""
__UpperCAmelCase , __UpperCAmelCase =m, n
while i > 0 and j > 0:
__UpperCAmelCase =1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__UpperCAmelCase =x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__A = "AGGTAB"
__A = "GXTXAYB"
__A = 4
__A = "GTAB"
__A , __A = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 68 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "spiece.model"}
__A = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
__A = {
"AI-Sweden/gpt-sw3-126m": 20_48,
"AI-Sweden/gpt-sw3-350m": 20_48,
"AI-Sweden/gpt-sw3-1.6b": 20_48,
"AI-Sweden/gpt-sw3-6.7b": 20_48,
"AI-Sweden/gpt-sw3-20b": 20_48,
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
__UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCAmelCase =kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
__UpperCAmelCase ="""None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__UpperCAmelCase ="""<|endoftext|>""" if eos_token is None else eos_token
__UpperCAmelCase ="""<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__UpperCAmelCase =unk_token if pad_token is None else pad_token
__UpperCAmelCase =eos_token if bos_token is None else bos_token
else:
__UpperCAmelCase ="""<pad>""" if pad_token is None else pad_token
__UpperCAmelCase ="""<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =do_lower_case
__UpperCAmelCase =remove_space
__UpperCAmelCase =keep_accents
__UpperCAmelCase =vocab_file
__UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# Used for whitespace normalization in input texts
# fmt : off
__UpperCAmelCase ={""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__UpperCAmelCase =re.compile(
f'''[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__( self : Any ) -> str:
__UpperCAmelCase =self.__dict__.copy()
__UpperCAmelCase =None
return state
def __setstate__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCAmelCase ={}
__UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _a ( self : Union[str, Any] ) -> int:
return len(self.sp_model )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str ) -> str:
__UpperCAmelCase =self.non_printing_characters_re.sub("""""" , __SCREAMING_SNAKE_CASE )
# Normalize whitespaces
__UpperCAmelCase ="""""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
__UpperCAmelCase =unicodedata.normalize("""NFC""" , __SCREAMING_SNAKE_CASE )
return text
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
__UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE )
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> int:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> str:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
@staticmethod
def _a ( __SCREAMING_SNAKE_CASE : str ) -> str:
return out_string
def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> str:
__UpperCAmelCase =[]
__UpperCAmelCase =""""""
__UpperCAmelCase =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
__UpperCAmelCase =True
__UpperCAmelCase =[]
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string
def _a ( self : Any ) -> Dict[str, int]:
__UpperCAmelCase ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase =os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi:
__UpperCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase =[self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text]
__UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE )
if return_tensors is True or return_tensors == "pt":
__UpperCAmelCase =torch.tensor(__SCREAMING_SNAKE_CASE )
return token_ids
def _a ( self : str , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str:
return self.sp_model.decode(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]:
__UpperCAmelCase =[f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
__UpperCAmelCase =(
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(__SCREAMING_SNAKE_CASE ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=__SCREAMING_SNAKE_CASE )
| 68 | 1 |
def lowercase__ ( A_: Tuple ) -> str:
"""simple docstring"""
__UpperCAmelCase =len(A_ )
__UpperCAmelCase =sum(A_ )
__UpperCAmelCase =[[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__UpperCAmelCase =True
for i in range(1 , s + 1 ):
__UpperCAmelCase =False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__UpperCAmelCase =dp[i][j - 1]
if arr[i - 1] <= j:
__UpperCAmelCase =dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__UpperCAmelCase =s - 2 * j
break
return diff
| 68 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__A = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Path , __SCREAMING_SNAKE_CASE : Union[str, None] = None , __SCREAMING_SNAKE_CASE : Union[List[str], None] = None , __SCREAMING_SNAKE_CASE : Union[str, List[str], None] = None , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[str]:
__UpperCAmelCase =[file for file in os.listdir(__SCREAMING_SNAKE_CASE ) if os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )]
if identifier is not None:
__UpperCAmelCase =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for n_ in n_identifier:
__UpperCAmelCase =[file for file in files if n_ not in file]
else:
__UpperCAmelCase =[file for file in files if n_identifier not in file]
__UpperCAmelCase =ignore_files or []
ignore_files.append("""__init__.py""" )
__UpperCAmelCase =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __SCREAMING_SNAKE_CASE )
if only_modules:
__UpperCAmelCase =file.split(""".""" )[0]
try:
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =doctest.DocTestSuite(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =unittest.TextTestRunner().run(__SCREAMING_SNAKE_CASE )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__UpperCAmelCase =doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _a ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""modeling"""
__UpperCAmelCase =[
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""tokenization"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""configuration"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ) -> Tuple:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase =["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , n_identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase =Path("""docs/source""" )
__UpperCAmelCase =["""favicon.ico"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE , only_modules=__SCREAMING_SNAKE_CASE )
| 68 | 1 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__A = logging.get_logger(__name__)
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = 'vision-encoder-decoder'
lowerCamelCase : int = True
def __init__( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : int ) -> Dict:
super().__init__(**__SCREAMING_SNAKE_CASE )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
__UpperCAmelCase =kwargs.pop("""encoder""" )
__UpperCAmelCase =encoder_config.pop("""model_type""" )
__UpperCAmelCase =kwargs.pop("""decoder""" )
__UpperCAmelCase =decoder_config.pop("""model_type""" )
__UpperCAmelCase =AutoConfig.for_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =AutoConfig.for_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =True
@classmethod
def _a ( cls : List[str] , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : PretrainedConfig , **__SCREAMING_SNAKE_CASE : str ) -> PretrainedConfig:
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
__UpperCAmelCase =True
__UpperCAmelCase =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase =copy.deepcopy(self.__dict__ )
__UpperCAmelCase =self.encoder.to_dict()
__UpperCAmelCase =self.decoder.to_dict()
__UpperCAmelCase =self.__class__.model_type
return output
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = version.parse('1.11' )
@property
def _a ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a ( self : Any ) -> float:
return 1e-4
@property
def _a ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( UpperCamelCase ):
"""simple docstring"""
@property
def _a ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
__UpperCAmelCase =OrderedDict()
__UpperCAmelCase ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
__UpperCAmelCase ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
__UpperCAmelCase ={0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : "PreTrainedTokenizerBase" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
import torch
__UpperCAmelCase =OrderedDict()
__UpperCAmelCase =super().generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =dummy_input["""input_ids"""].shape
__UpperCAmelCase =(batch, encoder_sequence, self._config.encoder_hidden_size)
__UpperCAmelCase =dummy_input.pop("""input_ids""" )
__UpperCAmelCase =dummy_input.pop("""attention_mask""" )
__UpperCAmelCase =torch.zeros(__SCREAMING_SNAKE_CASE )
return common_inputs
class _A ( UpperCamelCase ):
"""simple docstring"""
@property
def _a ( self : List[Any] ) -> None:
pass
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : PretrainedConfig ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(__SCREAMING_SNAKE_CASE )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" ) -> OnnxConfig:
__UpperCAmelCase =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 68 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__A = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase__ ( A_: int , A_: Optional[Any] , A_: List[str]=None ) -> List[str]:
"""simple docstring"""
if rng is None:
__UpperCAmelCase =random.Random()
__UpperCAmelCase =1
for dim in shape:
total_dims *= dim
__UpperCAmelCase =[]
for _ in range(A_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__UpperCAmelCase =np.array(A_ , dtype=jnp.intaa ).reshape(A_ )
return output
def lowercase__ ( A_: List[str] , A_: List[str]=None ) -> Any:
"""simple docstring"""
__UpperCAmelCase =ids_tensor(A_ , vocab_size=2 , rng=A_ )
# make sure that at least one token is attended to for each batch
__UpperCAmelCase =1
return attn_mask
@require_flax
class _A :
"""simple docstring"""
lowerCamelCase : Optional[Any] = None
lowerCamelCase : int = ()
def _a ( self : str ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__UpperCAmelCase =2
__UpperCAmelCase =inputs["""input_ids"""].shape[-1] // 2
__UpperCAmelCase =inputs["""input_ids"""][:max_batch_size, :sequence_length]
__UpperCAmelCase =jnp.ones_like(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__UpperCAmelCase =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__UpperCAmelCase =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _a ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =0
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =pt_model_class(__SCREAMING_SNAKE_CASE ).eval()
__UpperCAmelCase =load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , flax_model.params )
__UpperCAmelCase =flax_model.generate(__SCREAMING_SNAKE_CASE ).sequences
__UpperCAmelCase =pt_model.generate(torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__UpperCAmelCase =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _a ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
__UpperCAmelCase =0.8
__UpperCAmelCase =10
__UpperCAmelCase =0.3
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =2
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ) -> Any:
__UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
__UpperCAmelCase =FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__UpperCAmelCase ="""Hello world"""
__UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """do_samples""" ):
model.generate(__SCREAMING_SNAKE_CASE , do_samples=__SCREAMING_SNAKE_CASE )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """foo""" ):
__UpperCAmelCase ={"""foo""": """bar"""}
model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 68 | 1 |
from __future__ import annotations
from PIL import Image
# Define glider example
__A = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__A = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowercase__ ( A_: list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
__UpperCAmelCase =[]
for i in range(len(A_ ) ):
__UpperCAmelCase =[]
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__UpperCAmelCase =0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__UpperCAmelCase =cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A_ )
return next_generation
def lowercase__ ( A_: list[list[int]] , A_: int ) -> list[Image.Image]:
"""simple docstring"""
__UpperCAmelCase =[]
for _ in range(A_ ):
# Create output image
__UpperCAmelCase =Image.new("""RGB""" , (len(cells[0] ), len(A_ )) )
__UpperCAmelCase =img.load()
# Save cells to image
for x in range(len(A_ ) ):
for y in range(len(cells[0] ) ):
__UpperCAmelCase =255 - cells[y][x] * 255
__UpperCAmelCase =(colour, colour, colour)
# Save image
images.append(A_ )
__UpperCAmelCase =new_generation(A_ )
return images
if __name__ == "__main__":
__A = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 68 |
from __future__ import annotations
from collections.abc import Iterator
class _A :
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> None:
__UpperCAmelCase =value
__UpperCAmelCase =None
__UpperCAmelCase =None
class _A :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Node ) -> None:
__UpperCAmelCase =tree
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Node | None ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"tokenizer_file": "tokenizer.json"}
__A = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = ['input_ids', 'attention_mask']
lowerCamelCase : Tuple = None
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , __SCREAMING_SNAKE_CASE : Optional[Any]="<pad>" , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : int=False , **__SCREAMING_SNAKE_CASE : int , ) -> Any:
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop("""type""" ) )
__UpperCAmelCase =add_prefix_space
__UpperCAmelCase =pre_tok_class(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =add_prefix_space
def _a ( self : int , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> BatchEncoding:
__UpperCAmelCase =kwargs.get("""is_split_into_words""" , __SCREAMING_SNAKE_CASE )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> BatchEncoding:
__UpperCAmelCase =kwargs.get("""is_split_into_words""" , __SCREAMING_SNAKE_CASE )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
""" pretokenized inputs.""" )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
__UpperCAmelCase =self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _a ( self : int , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]:
__UpperCAmelCase =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(__SCREAMING_SNAKE_CASE ) > self.model_max_length:
__UpperCAmelCase =input_ids[-self.model_max_length :]
return input_ids
| 68 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowercase__ ( A_: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A_ , AssumeRolePolicyDocument=json.dumps(A_ , indent=2 ) )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def lowercase__ ( A_: Dict ) -> Any:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
return iam_client.get_role(RoleName=A_ )["Role"]["Arn"]
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =_ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , A_ , )
__UpperCAmelCase =None
if credentials_configuration == 0:
__UpperCAmelCase =_ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
__UpperCAmelCase =aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__UpperCAmelCase =_ask_field("""AWS Access Key ID: """ )
__UpperCAmelCase =aws_access_key_id
__UpperCAmelCase =_ask_field("""AWS Secret Access Key: """ )
__UpperCAmelCase =aws_secret_access_key
__UpperCAmelCase =_ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
__UpperCAmelCase =aws_region
__UpperCAmelCase =_ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , A_ , )
if role_management == 0:
__UpperCAmelCase =_ask_field("""Enter your IAM role name: """ )
else:
__UpperCAmelCase ="""accelerate_sagemaker_execution_role"""
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(A_ )
__UpperCAmelCase =_ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_custom_docker_image:
__UpperCAmelCase =_ask_field("""Enter your Docker image: """ , lambda A_ : str(A_ ).lower() )
__UpperCAmelCase =_ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_inputs_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_metrics_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
__UpperCAmelCase ={}
__UpperCAmelCase =_ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
__UpperCAmelCase ="""dynamo_"""
__UpperCAmelCase =_ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__UpperCAmelCase =_ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
__UpperCAmelCase =_ask_options(
"""Which mode do you want to use?""" , A_ , lambda A_ : TORCH_DYNAMO_MODES[int(A_ )] , default="""default""" , )
__UpperCAmelCase =_ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase ="""Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__UpperCAmelCase =_ask_options(
A_ , A_ , lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__UpperCAmelCase =_ask_field(A_ , lambda A_ : str(A_ ).lower() , default="""ml.p3.2xlarge""" )
__UpperCAmelCase =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__UpperCAmelCase =_ask_field(
"""How many machines do you want use? [1]: """ , A_ , default=1 , )
__UpperCAmelCase =_ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=A_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A_ , use_cpu=A_ , dynamo_config=A_ , eca_instance_type=A_ , profile=A_ , region=A_ , iam_role_name=A_ , mixed_precision=A_ , num_machines=A_ , sagemaker_inputs_file=A_ , sagemaker_metrics_file=A_ , )
| 68 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.