code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=16 , lowerCamelCase=[1, 2, 1] , lowerCamelCase=[2, 2, 4] , lowerCamelCase=2 , lowerCamelCase=2.0 , lowerCamelCase=True , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase="gelu" , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=0.02 , lowerCamelCase=1E-5 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=10 , lowerCamelCase=8 , ) -> str:
"""simple docstring"""
snake_case__ : Union[str, Any] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Optional[int] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : Any = num_channels
snake_case__ : str = embed_dim
snake_case__ : int = depths
snake_case__ : List[Any] = num_heads
snake_case__ : Any = window_size
snake_case__ : int = mlp_ratio
snake_case__ : Any = qkv_bias
snake_case__ : int = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : str = drop_path_rate
snake_case__ : List[str] = hidden_act
snake_case__ : Tuple = use_absolute_embeddings
snake_case__ : Any = patch_norm
snake_case__ : Tuple = layer_norm_eps
snake_case__ : List[Any] = initializer_range
snake_case__ : Tuple = is_training
snake_case__ : int = scope
snake_case__ : Optional[int] = use_labels
snake_case__ : int = type_sequence_label_size
snake_case__ : Union[str, Any] = encoder_stride
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Optional[Any] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Any = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : int = SwinvaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : List[str] = model(lowerCamelCase )
snake_case__ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = SwinvaForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Any = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case__ : int = 1
snake_case__ : Dict = SwinvaForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : int = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Any = self.type_sequence_label_size
snake_case__ : int = SwinvaForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Any = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ ,snake_case__ ,snake_case__ : str = config_and_inputs
snake_case__ : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_lowerCAmelCase = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = SwinvaModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=lowerCamelCase , embed_dim=37 )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ ,snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(lowerCamelCase )
snake_case__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[int] = [*signature.parameters.keys()]
snake_case__ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[int] = True
for model_class in self.all_model_classes:
snake_case__ : Dict = True
snake_case__ : List[Any] = False
snake_case__ : str = True
snake_case__ : Union[str, Any] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case__ : Optional[Any] = outputs.attentions
snake_case__ : int = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ : List[str] = True
snake_case__ : int = config.window_size**2
snake_case__ : Union[str, Any] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case__ : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
snake_case__ : Optional[int] = len(lowerCamelCase )
# Check attention is always last and order is fine
snake_case__ : str = True
snake_case__ : str = True
snake_case__ : List[str] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
snake_case__ : Tuple = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
snake_case__ : str = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCamelCase ) )
snake_case__ : List[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[int] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case__ : List[str] = outputs.hidden_states
snake_case__ : Optional[int] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# Swinv2 has a different seq_length
snake_case__ : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case__ : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : Tuple = reshaped_hidden_states[0].shape
snake_case__ : int = (
reshaped_hidden_states[0].view(lowerCamelCase , lowerCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case__ : int = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Tuple = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ ,snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[int] = 3
snake_case__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Any = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , (padded_height, padded_width) )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[int] = SwinvaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ ,snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = _config_zero_init(lowerCamelCase )
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Union[str, Any] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
lowerCamelCase )
snake_case__ : List[Any] = self.default_image_processor
snake_case__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
snake_case__ : int = image_processor(images=lowerCamelCase , return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : List[str] = model(**lowerCamelCase )
# verify the logits
snake_case__ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
snake_case__ : List[Any] = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
| 261 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_lowerCAmelCase : List[str] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_lowerCAmelCase : Tuple = tuple[int, int]
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : Tuple = pos_x
snake_case__ : Dict = pos_y
snake_case__ : Optional[int] = (pos_y, pos_x)
snake_case__ : List[Any] = goal_x
snake_case__ : Tuple = goal_y
snake_case__ : int = g_cost
snake_case__ : Union[str, Any] = parent
snake_case__ : List[Any] = self.calculate_heuristic()
snake_case__ : Dict = self.g_cost + self.h_cost
def lowercase__ ( self ) -> float:
"""simple docstring"""
snake_case__ : Tuple = self.pos_x - self.goal_x
snake_case__ : Any = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase ) + abs(lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowerCamelCase ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase )
snake_case__ : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , lowerCamelCase )
snake_case__ : List[Any] = [self.start]
snake_case__ : list[Node] = []
snake_case__ : str = False
def lowercase__ ( self ) -> list[TPosition]:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case__ : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase )
self.closed_nodes.append(lowerCamelCase )
snake_case__ : Tuple = self.get_successors(lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
snake_case__ : Optional[int] = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase )
else:
self.open_nodes.append(lowerCamelCase )
return [self.start.pos]
def lowercase__ ( self , lowerCamelCase ) -> list[Node]:
"""simple docstring"""
snake_case__ : str = []
for action in delta:
snake_case__ : Any = parent.pos_x + action[1]
snake_case__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase , lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase , ) )
return successors
def lowercase__ ( self , lowerCamelCase ) -> list[TPosition]:
"""simple docstring"""
snake_case__ : int = node
snake_case__ : List[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : str = current_node.parent
path.reverse()
return path
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
snake_case__ : List[str] = AStar(lowerCamelCase , lowerCamelCase )
snake_case__ : str = AStar(lowerCamelCase , lowerCamelCase )
snake_case__ : str = False
def lowercase__ ( self ) -> list[TPosition]:
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
snake_case__ : Any = self.fwd_astar.open_nodes.pop(0 )
snake_case__ : Optional[int] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase , lowerCamelCase )
self.fwd_astar.closed_nodes.append(lowerCamelCase )
self.bwd_astar.closed_nodes.append(lowerCamelCase )
snake_case__ : Dict = current_bwd_node
snake_case__ : int = current_fwd_node
snake_case__ : Optional[Any] = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
snake_case__ : Optional[int] = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase )
else:
astar.open_nodes.append(lowerCamelCase )
return [self.fwd_astar.start.pos]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> list[TPosition]:
"""simple docstring"""
snake_case__ : str = self.fwd_astar.retrace_path(lowerCamelCase )
snake_case__ : str = self.bwd_astar.retrace_path(lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_lowerCAmelCase : int = (0, 0)
_lowerCAmelCase : Optional[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_lowerCAmelCase : List[str] = time.time()
_lowerCAmelCase : Any = AStar(init, goal)
_lowerCAmelCase : Union[str, Any] = a_star.search()
_lowerCAmelCase : str = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
_lowerCAmelCase : Optional[Any] = time.time()
_lowerCAmelCase : List[Any] = BidirectionalAStar(init, goal)
_lowerCAmelCase : Dict = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 261 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowercase ( _A ):
_a : Optional[Any] = (EulerDiscreteScheduler,)
_a : Tuple = 10
def lowercase__ ( self , **a ):
snake_case__ : Dict ={
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**a )
return config
def lowercase__ ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a )
def lowercase__ ( self ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a , beta_end=a )
def lowercase__ ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def lowercase__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def lowercase__ ( self ):
snake_case__ : Union[str, Any] =self.scheduler_classes[0]
snake_case__ : Tuple =self.get_scheduler_config()
snake_case__ : Optional[int] =scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Optional[Any] =torch.manual_seed(0 )
snake_case__ : Tuple =self.dummy_model()
snake_case__ : str =self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : List[Any] =sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Optional[Any] =scheduler.scale_model_input(a , a )
snake_case__ : int =model(a , a )
snake_case__ : Any =scheduler.step(a , a , a , generator=a )
snake_case__ : int =output.prev_sample
snake_case__ : Any =torch.sum(torch.abs(a ) )
snake_case__ : Any =torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def lowercase__ ( self ):
snake_case__ : List[Any] =self.scheduler_classes[0]
snake_case__ : Union[str, Any] =self.get_scheduler_config(prediction_type="""v_prediction""" )
snake_case__ : int =scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : List[str] =torch.manual_seed(0 )
snake_case__ : int =self.dummy_model()
snake_case__ : List[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : int =sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Union[str, Any] =scheduler.scale_model_input(a , a )
snake_case__ : Any =model(a , a )
snake_case__ : Union[str, Any] =scheduler.step(a , a , a , generator=a )
snake_case__ : Optional[Any] =output.prev_sample
snake_case__ : List[Any] =torch.sum(torch.abs(a ) )
snake_case__ : Tuple =torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def lowercase__ ( self ):
snake_case__ : List[Any] =self.scheduler_classes[0]
snake_case__ : Optional[int] =self.get_scheduler_config()
snake_case__ : int =scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
snake_case__ : Optional[int] =torch.manual_seed(0 )
snake_case__ : Tuple =self.dummy_model()
snake_case__ : str =self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Union[str, Any] =sample.to(a )
for t in scheduler.timesteps:
snake_case__ : int =scheduler.scale_model_input(a , a )
snake_case__ : Any =model(a , a )
snake_case__ : str =scheduler.step(a , a , a , generator=a )
snake_case__ : Optional[Any] =output.prev_sample
snake_case__ : Dict =torch.sum(torch.abs(a ) )
snake_case__ : Optional[int] =torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def lowercase__ ( self ):
snake_case__ : List[str] =self.scheduler_classes[0]
snake_case__ : Optional[int] =self.get_scheduler_config()
snake_case__ : List[str] =scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
snake_case__ : Tuple =torch.manual_seed(0 )
snake_case__ : Dict =self.dummy_model()
snake_case__ : Optional[int] =self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : List[str] =sample.to(a )
for t in scheduler.timesteps:
snake_case__ : List[str] =scheduler.scale_model_input(a , a )
snake_case__ : Any =model(a , a )
snake_case__ : str =scheduler.step(a , a , a , generator=a )
snake_case__ : Optional[Any] =output.prev_sample
snake_case__ : Optional[int] =torch.sum(torch.abs(a ) )
snake_case__ : Union[str, Any] =torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 717 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class _lowercase ( _A ):
_a : int = 'xlm'
_a : List[Any] = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self , a=3_0_1_4_5 , a=2_0_4_8 , a=1_2 , a=1_6 , a=0.1 , a=0.1 , a=True , a=False , a=False , a=False , a=1 , a=True , a=5_1_2 , a=2_0_4_8**-0.5 , a=1e-12 , a=0.02 , a=0 , a=1 , a=2 , a=3 , a=5 , a=True , a="first" , a=True , a=None , a=True , a=0.1 , a=5 , a=5 , a=0 , a=0 , a=2 , a=0 , **a , ):
snake_case__ : str =vocab_size
snake_case__ : List[Any] =emb_dim
snake_case__ : List[str] =n_layers
snake_case__ : Any =n_heads
snake_case__ : Any =dropout
snake_case__ : Optional[int] =attention_dropout
snake_case__ : Dict =gelu_activation
snake_case__ : Tuple =sinusoidal_embeddings
snake_case__ : Optional[int] =causal
snake_case__ : Optional[int] =asm
snake_case__ : Optional[int] =n_langs
snake_case__ : Union[str, Any] =use_lang_emb
snake_case__ : Optional[Any] =layer_norm_eps
snake_case__ : Union[str, Any] =bos_index
snake_case__ : List[Any] =eos_index
snake_case__ : Optional[Any] =pad_index
snake_case__ : Union[str, Any] =unk_index
snake_case__ : Dict =mask_index
snake_case__ : str =is_encoder
snake_case__ : Optional[Any] =max_position_embeddings
snake_case__ : Any =embed_init_std
snake_case__ : Optional[Any] =init_std
snake_case__ : str =summary_type
snake_case__ : List[Any] =summary_use_proj
snake_case__ : str =summary_activation
snake_case__ : Optional[Any] =summary_proj_to_labels
snake_case__ : Optional[int] =summary_first_dropout
snake_case__ : Optional[Any] =start_n_top
snake_case__ : List[Any] =end_n_top
snake_case__ : Union[str, Any] =mask_token_id
snake_case__ : Any =lang_id
if "n_words" in kwargs:
snake_case__ : int =kwargs["""n_words"""]
super().__init__(pad_token_id=a , bos_token_id=a , **a )
class _lowercase ( _A ):
@property
def lowercase__ ( self ):
if self.task == "multiple-choice":
snake_case__ : Any ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case__ : Union[str, Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 448 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCamelCase = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class UpperCAmelCase ( unittest.TestCase , _snake_case ):
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ :int = load_tool('''text-question-answering''' )
self.tool.setup()
UpperCAmelCase__ :List[Any] = load_tool('''text-question-answering''' , remote=__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
UpperCAmelCase__ :Any = self.tool(__lowerCamelCase , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(__lowerCamelCase , '''launched the BigScience Research Workshop''' )
def __SCREAMING_SNAKE_CASE ( self : int ):
UpperCAmelCase__ :Optional[Any] = self.remote_tool(__lowerCamelCase , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(__lowerCamelCase , '''launched the BigScience Research Workshop''' )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ :Any = self.tool(text=__lowerCamelCase , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(__lowerCamelCase , '''launched the BigScience Research Workshop''' )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
UpperCAmelCase__ :Optional[int] = self.remote_tool(text=__lowerCamelCase , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(__lowerCamelCase , '''launched the BigScience Research Workshop''' )
| 467 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''', [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''', num_bytes=1_337, num_examples=42, dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''', num_bytes=1_337, num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
], )
def a__ ( UpperCamelCase_ : SplitDict ):
UpperCAmelCase__ :List[Any] = split_dict._to_yaml_list()
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCAmelCase__ :Tuple = SplitDict._from_yaml_list(UpperCamelCase_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCAmelCase__ :Union[str, Any] = None
# the split name of split_dict takes over the name of the split info object
UpperCAmelCase__ :Union[str, Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''', [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase_ ), SplitInfo(dataset_name='''my_dataset''' )] )
def a__ ( UpperCamelCase_ : List[Any] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
UpperCAmelCase__ :List[Any] = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 467 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = None
class _lowerCAmelCase ( A__ , A__ ):
"""simple docstring"""
snake_case_ = 2
@register_to_config
def __init__( self : Any , __snake_case : float = 0.02 , __snake_case : float = 1_00 , __snake_case : float = 1.0_07 , __snake_case : float = 80 , __snake_case : float = 0.05 , __snake_case : float = 50 , )-> List[str]:
# standard deviation of the initial noise distribution
snake_case = sigma_max
# setable values
snake_case = None
snake_case = None
snake_case = None # sigma(t_i)
def lowerCAmelCase ( self : Optional[int] , __snake_case : torch.FloatTensor , __snake_case : Optional[int] = None )-> torch.FloatTensor:
return sample
def lowerCAmelCase ( self : Optional[int] , __snake_case : int , __snake_case : Union[str, torch.device] = None )-> Dict:
snake_case = num_inference_steps
snake_case = np.arange(0 , self.num_inference_steps )[::-1].copy()
snake_case = torch.from_numpy(__snake_case ).to(__snake_case )
snake_case = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
snake_case = torch.tensor(__snake_case , dtype=torch.floataa , device=__snake_case )
def lowerCAmelCase ( self : Optional[int] , __snake_case : torch.FloatTensor , __snake_case : float , __snake_case : Optional[torch.Generator] = None )-> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
snake_case = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
snake_case = 0
# sample eps ~ N(0, S_noise^2 * I)
snake_case = self.config.s_noise * randn_tensor(sample.shape , generator=__snake_case ).to(sample.device )
snake_case = sigma + gamma * sigma
snake_case = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCAmelCase ( self : Optional[Any] , __snake_case : torch.FloatTensor , __snake_case : float , __snake_case : float , __snake_case : torch.FloatTensor , __snake_case : bool = True , )-> Union[KarrasVeOutput, Tuple]:
snake_case = sample_hat + sigma_hat * model_output
snake_case = (sample_hat - pred_original_sample) / sigma_hat
snake_case = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__snake_case , derivative=__snake_case , pred_original_sample=__snake_case )
def lowerCAmelCase ( self : List[str] , __snake_case : torch.FloatTensor , __snake_case : float , __snake_case : float , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , __snake_case : bool = True , )-> Union[KarrasVeOutput, Tuple]:
snake_case = sample_prev + sigma_prev * model_output
snake_case = (sample_prev - pred_original_sample) / sigma_prev
snake_case = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__snake_case , derivative=__snake_case , pred_original_sample=__snake_case )
def lowerCAmelCase ( self : Dict , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] )-> Any:
raise NotImplementedError()
| 707 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_SCREAMING_SNAKE_CASE = random.Random()
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=1.0 , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[int]=None ) -> Optional[Any]:
if rng is None:
snake_case = global_rng
snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __snake_case : int , __snake_case : Dict=7 , __snake_case : Optional[int]=4_00 , __snake_case : Optional[int]=20_00 , __snake_case : List[str]=1 , __snake_case : str=0.0 , __snake_case : Dict=1_60_00 , __snake_case : Dict=True , __snake_case : Optional[int]=True , )-> Optional[int]:
snake_case = parent
snake_case = batch_size
snake_case = min_seq_length
snake_case = max_seq_length
snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case = feature_size
snake_case = padding_value
snake_case = sampling_rate
snake_case = return_attention_mask
snake_case = do_normalize
def lowerCAmelCase ( self : Union[str, Any] )-> Dict:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase ( self : Tuple , __snake_case : List[Any]=False , __snake_case : int=False )-> Tuple:
def _flatten(__snake_case : List[str] ):
return list(itertools.chain(*__snake_case ) )
if equal_length:
snake_case = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case = [np.asarray(__snake_case ) for x in speech_inputs]
return speech_inputs
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = WavaVecaFeatureExtractor
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = WavaVecaFeatureExtractionTester(self )
def lowerCAmelCase ( self : Dict , __snake_case : str )-> List[Any]:
self.assertTrue(np.all(np.mean(__snake_case , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__snake_case , axis=0 ) - 1 ) < 1e-3 ) )
def lowerCAmelCase ( self : Optional[Any] )-> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = [np.asarray(__snake_case ) for speech_input in speech_inputs]
# Test not batched input
snake_case = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
snake_case = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1e-3 ) )
# Test batched
snake_case = feat_extract(__snake_case , return_tensors="""np""" ).input_values
snake_case = feat_extract(__snake_case , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__snake_case , __snake_case ):
self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case = np.asarray(__snake_case )
snake_case = feat_extract(__snake_case , return_tensors="""np""" ).input_values
snake_case = feat_extract(__snake_case , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__snake_case , __snake_case ):
self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1e-3 ) )
def lowerCAmelCase ( self : int )-> List[str]:
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = ["""longest""", """max_length""", """do_not_pad"""]
snake_case = [None, 16_00, None]
for max_length, padding in zip(__snake_case , __snake_case ):
snake_case = feat_extract(__snake_case , padding=__snake_case , max_length=__snake_case , return_tensors="""np""" )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def lowerCAmelCase ( self : Any )-> Tuple:
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = range(8_00 , 14_00 , 2_00 )
snake_case = [floats_list((1, x) )[0] for x in lengths]
snake_case = ["""longest""", """max_length""", """do_not_pad"""]
snake_case = [None, 16_00, None]
for max_length, padding in zip(__snake_case , __snake_case ):
snake_case = feat_extract(__snake_case , max_length=__snake_case , padding=__snake_case )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def lowerCAmelCase ( self : int )-> str:
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = feat_extract(
__snake_case , truncation=__snake_case , max_length=10_00 , padding="""max_length""" , return_tensors="""np""" )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase ( self : int )-> str:
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = feat_extract(
__snake_case , truncation=__snake_case , max_length=10_00 , padding="""longest""" , return_tensors="""np""" )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = feat_extract(
__snake_case , truncation=__snake_case , max_length=20_00 , padding="""longest""" , return_tensors="""np""" )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
@require_torch
def lowerCAmelCase ( self : List[str] )-> Union[str, Any]:
import torch
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = np.random.rand(1_00 ).astype(np.floataa )
snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def lowerCAmelCase ( self : str )-> List[Any]:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
snake_case = WavaVecaConfig.from_pretrained(__snake_case )
snake_case = WavaVecaFeatureExtractor.from_pretrained(__snake_case )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 517 | 0 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__A = trt.Logger(trt.Logger.WARNING)
__A = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__A = logging.getLogger(__name__)
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=3_84,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=1_28,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__A = parser.parse_args()
if args.tokenizer_name:
__A = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__A = args.per_device_eval_batch_size
__A = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__A = True
__A = "temp_engine/bert-fp32.engine"
if args.fpaa:
__A = "temp_engine/bert-fp16.engine"
if args.inta:
__A = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__A = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__A = [network.get_input(i) for i in range(network.num_inputs)]
__A = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__A = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__A = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__A = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def lowercase__ ( A_: Optional[Any] , A_: List[str] , A_: List[Any] , A_: Optional[Any] , A_: Tuple , A_: Tuple , A_: Tuple , A_: List[str] ) -> Dict:
"""simple docstring"""
__UpperCAmelCase =np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__UpperCAmelCase =np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__UpperCAmelCase =np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , A_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , A_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , A_ )
# start time
__UpperCAmelCase =time.time()
# Run inference
context.execute_async(
bindings=[int(A_ ) for d_inp in d_inputs] + [int(A_ ), int(A_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(A_ , A_ , A_ )
cuda.memcpy_dtoh_async(A_ , A_ , A_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__UpperCAmelCase =time.time()
__UpperCAmelCase =end_time - start_time
__UpperCAmelCase =(h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__A = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__A = raw_datasets["validation"].column_names
__A = "question" if "question" in column_names else column_names[0]
__A = "context" if "context" in column_names else column_names[1]
__A = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__A = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__A = min(args.max_seq_length, tokenizer.model_max_length)
def lowercase__ ( A_: List[str] ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =[q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__UpperCAmelCase =tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=A_ , stride=args.doc_stride , return_overflowing_tokens=A_ , return_offsets_mapping=A_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__UpperCAmelCase =tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__UpperCAmelCase =[]
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__UpperCAmelCase =tokenized_examples.sequence_ids(A_ )
__UpperCAmelCase =1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__UpperCAmelCase =sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__UpperCAmelCase =[
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__A = raw_datasets["validation"]
# Validation Feature Creation
__A = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__A = default_data_collator
__A = eval_dataset.remove_columns(["example_id", "offset_mapping"])
__A = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowercase__ ( A_: Union[str, Any] , A_: Optional[Any] , A_: Optional[int] , A_: Union[str, Any]="eval" ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase =postprocess_qa_predictions(
examples=A_ , features=A_ , predictions=A_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=A_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__UpperCAmelCase =[
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__UpperCAmelCase =[{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__UpperCAmelCase =[{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=A_ , label_ids=A_ )
__A = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowercase__ ( A_: List[Any] ) -> Dict:
"""simple docstring"""
return trt.volume(engine.get_binding_shape(A_ ) ) * engine.get_binding_dtype(A_ ).itemsize
# Allocate device memory for inputs and outputs.
__A = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__A = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__A = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__A = cuda.mem_alloc(h_outputa.nbytes)
__A = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__A = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(F""" Num examples = {len(eval_dataset)}""")
logger.info(F""" Batch size = {args.per_device_eval_batch_size}""")
__A = 0.0
__A = 0
__A = timeit.default_timer()
__A = None
for step, batch in enumerate(eval_dataloader):
__A , __A = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__A , __A = outputs
__A = torch.tensor(start_logits)
__A = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__A = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
__A = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
__A = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__A = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
__A = nested_truncate(all_preds, len(eval_dataset))
__A = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 10_00 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 10_00))
logger.info("Total Number of Inference = %d", niter)
__A = post_processing_function(eval_examples, eval_dataset, all_preds)
__A = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"""Evaluation metrics: {eval_metric}""")
| 68 |
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowercase_ ( _lowercase , _lowercase=False ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = OmegaConf.load(_lowercase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowercase ) ) )
return config
def lowercase_ ( _lowercase , _lowercase=None , _lowercase=None ) -> Optional[int]:
'''simple docstring'''
if conf_path is None:
lowerCamelCase_ : int = '''./model_checkpoints/vqgan_only.yaml'''
lowerCamelCase_ : Dict = load_config(_lowercase , display=_lowercase )
lowerCamelCase_ : List[str] = VQModel(**config.model.params )
if ckpt_path is None:
lowerCamelCase_ : int = '''./model_checkpoints/vqgan_only.pt'''
lowerCamelCase_ : Union[str, Any] = torch.load(_lowercase , map_location=_lowercase )
if ".ckpt" in ckpt_path:
lowerCamelCase_ : str = sd['''state_dict''']
model.load_state_dict(_lowercase , strict=_lowercase )
model.to(_lowercase )
del sd
return model
def lowercase_ ( _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Any = model.encode(_lowercase )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
lowerCamelCase_ : Any = model.decode(_lowercase )
return xrec
def lowercase_ ( _lowercase , _lowercase=False ) -> Any:
'''simple docstring'''
lowerCamelCase_, lowerCamelCase_ : Any = string.rsplit('''.''' , 1 )
if reload:
lowerCamelCase_ : int = importlib.import_module(_lowercase )
importlib.reload(_lowercase )
return getattr(importlib.import_module(_lowercase , package=_lowercase ) , cls )
def lowercase_ ( _lowercase ) -> List[str]:
'''simple docstring'''
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def lowercase_ ( _lowercase , _lowercase , _lowercase=True , _lowercase=True ) -> Any:
'''simple docstring'''
lowerCamelCase_ : int = instantiate_from_config(_lowercase )
if sd is not None:
model.load_state_dict(_lowercase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
if ckpt:
lowerCamelCase_ : List[Any] = torch.load(_lowercase , map_location='''cpu''' )
lowerCamelCase_ : int = pl_sd['''global_step''']
print(F"""loaded model from global step {global_step}.""" )
else:
lowerCamelCase_ : Optional[int] = {'''state_dict''': None}
lowerCamelCase_ : str = None
lowerCamelCase_ : Any = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=_lowercase , eval_mode=_lowercase )['''model''']
return model, global_step
| 422 | 0 |
import os
import sys
import unittest
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a = os.path.join(git_repo_path, "src", "transformers")
a = "\n{0} = None\n"
a = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
a = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class _A ( unittest.TestCase ):
def UpperCAmelCase ( self ):
_UpperCAmelCase = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , """tokenizers""" )
_UpperCAmelCase = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , """tensorflow_text""" )
_UpperCAmelCase = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , """sentencepiece_and_tokenizers""" )
_UpperCAmelCase = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , """sentencepiece_and_tensorflow_text""" )
_UpperCAmelCase = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , """sentencepiece_and_tokenizers_and_vision""" )
def UpperCAmelCase ( self ):
_UpperCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , _SCREAMING_SNAKE_CASE )
self.assertIn("""tensorflow_text""" , _SCREAMING_SNAKE_CASE )
self.assertIn("""sentencepiece_and_tokenizers""" , _SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def UpperCAmelCase ( self ):
_UpperCAmelCase = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , """\nCONSTANT = None\n""" )
_UpperCAmelCase = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
_SCREAMING_SNAKE_CASE , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_UpperCAmelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
_UpperCAmelCase = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
_UpperCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , _SCREAMING_SNAKE_CASE ) | 705 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
a = logging.get_logger("transformers.models.speecht5")
a = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
a = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
a = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
a = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
a = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
a = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
a = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
a = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
a = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
a = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a = []
a = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
a = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
a = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
a = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]:
for attribute in key.split(""".""" ):
_UpperCAmelCase = getattr(snake_case , snake_case )
if weight_type is not None:
_UpperCAmelCase = getattr(snake_case , snake_case ).shape
else:
_UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
elif weight_type == "running_mean":
_UpperCAmelCase = value
elif weight_type == "running_var":
_UpperCAmelCase = value
elif weight_type == "num_batches_tracked":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Tuple:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_UpperCAmelCase , _UpperCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = []
if task == "s2t":
_UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_UpperCAmelCase = MAPPING_S2T
_UpperCAmelCase = IGNORE_KEYS_S2T
elif task == "t2s":
_UpperCAmelCase = None
_UpperCAmelCase = MAPPING_T2S
_UpperCAmelCase = IGNORE_KEYS_T2S
elif task == "s2s":
_UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_UpperCAmelCase = MAPPING_S2S
_UpperCAmelCase = IGNORE_KEYS_S2S
else:
raise ValueError(f"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(snake_case , snake_case ):
logger.info(f"{name} was ignored" )
continue
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == """group""" , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_UpperCAmelCase , _UpperCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
_UpperCAmelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(snake_case )[0].split(""".""" )[-2]
_UpperCAmelCase = mapped_key.replace("""*""" , snake_case )
if "weight_g" in name:
_UpperCAmelCase = """weight_g"""
elif "weight_v" in name:
_UpperCAmelCase = """weight_v"""
elif "bias" in name:
_UpperCAmelCase = """bias"""
elif "weight" in name:
_UpperCAmelCase = """weight"""
elif "running_mean" in name:
_UpperCAmelCase = """running_mean"""
elif "running_var" in name:
_UpperCAmelCase = """running_var"""
elif "num_batches_tracked" in name:
_UpperCAmelCase = """num_batches_tracked"""
else:
_UpperCAmelCase = None
set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case )
continue
if not is_used:
unused_weights.append(snake_case )
logger.warning(f"Unused weights: {unused_weights}" )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
_UpperCAmelCase = full_name.split("""conv_layers.""" )[-1]
_UpperCAmelCase = name.split(""".""" )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , ) -> Union[str, Any]:
if config_path is not None:
_UpperCAmelCase = SpeechTaConfig.from_pretrained(snake_case )
else:
_UpperCAmelCase = SpeechTaConfig()
if task == "s2t":
_UpperCAmelCase = config.max_text_positions
_UpperCAmelCase = SpeechTaForSpeechToText(snake_case )
elif task == "t2s":
_UpperCAmelCase = 1_8_7_6
_UpperCAmelCase = 6_0_0
_UpperCAmelCase = config.max_speech_positions
_UpperCAmelCase = SpeechTaForTextToSpeech(snake_case )
elif task == "s2s":
_UpperCAmelCase = 1_8_7_6
_UpperCAmelCase = config.max_speech_positions
_UpperCAmelCase = SpeechTaForSpeechToSpeech(snake_case )
else:
raise ValueError(f"Unknown task name: {task}" )
if vocab_path:
_UpperCAmelCase = SpeechTaTokenizer(snake_case , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken("""<mask>""" , lstrip=snake_case , rstrip=snake_case )
_UpperCAmelCase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
_UpperCAmelCase = SpeechTaFeatureExtractor()
_UpperCAmelCase = SpeechTaProcessor(tokenizer=snake_case , feature_extractor=snake_case )
processor.save_pretrained(snake_case )
_UpperCAmelCase = torch.load(snake_case )
recursively_load_weights(fairseq_checkpoint["""model"""] , snake_case , snake_case )
model.save_pretrained(snake_case )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(snake_case )
model.push_to_hub(snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
) | 175 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class a_ :
def __init__( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str=1_3 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=9_9 , __lowerCAmelCase : Tuple=3_2 , __lowerCAmelCase : int=5 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=5_1_2 , __lowerCAmelCase : Dict=1_6 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Union[str, Any]=None , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_multiple_size
__snake_case = hidden_act
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = weight_tying
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def lowercase__ ( self : Dict ):
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase__ ( self : Union[str, Any] ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Dict ):
__snake_case , __snake_case , __snake_case , __snake_case = self.prepare_config_and_inputs()
__snake_case = True
return config, input_ids, input_mask, token_labels
def lowercase__ ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict ):
__snake_case = GPTNeoXJapaneseModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : str ):
__snake_case = True
__snake_case = GPTNeoXJapaneseModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int ):
__snake_case = GPTNeoXJapaneseForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ):
__snake_case = True
__snake_case = GPTNeoXJapaneseForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# first forward pass
__snake_case = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
__snake_case = output_from_no_past['hidden_states'][0]
__snake_case = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def lowercase__ ( self : int ):
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : Any = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowercase_ : Dict = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowercase_ : List[Any] = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowercase_ : Optional[int] = False
lowercase_ : Optional[Any] = False
lowercase_ : str = False
lowercase_ : Union[str, Any] = False
def lowercase__ ( self : List[str] ):
__snake_case = GPTNeoXJapaneseModelTester(self )
__snake_case = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Any ):
__snake_case , __snake_case , __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowercase__ ( self : int ):
__snake_case , __snake_case , __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowercase__ ( self : Dict ):
# This regression test was failing with PyTorch < 1.3
__snake_case , __snake_case , __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
__snake_case = None
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowercase__ ( self : Any ):
__snake_case , __snake_case , __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowercase__ ( self : Optional[int] ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowerCAmelCase )
@slow
def lowercase__ ( self : Any ):
__snake_case = 'abeja/gpt-neox-japanese-2.7b'
__snake_case = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
__snake_case = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
__snake_case = GPTNeoXJapaneseTokenizer.from_pretrained(__lowerCAmelCase )
__snake_case = GPTNeoXJapaneseForCausalLM.from_pretrained(__lowerCAmelCase )
__snake_case = []
for prompt in prompts:
__snake_case = tokenizer(__lowerCAmelCase , return_tensors='pt' ).input_ids
__snake_case = model.generate(__lowerCAmelCase , max_length=5_0 )
__snake_case = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 356 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_lowercase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowercase = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
_lowercase = {
"""google/electra-small-generator""": 5_12,
"""google/electra-base-generator""": 5_12,
"""google/electra-large-generator""": 5_12,
"""google/electra-small-discriminator""": 5_12,
"""google/electra-base-discriminator""": 5_12,
"""google/electra-large-discriminator""": 5_12,
}
_lowercase = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class a_ ( UpperCAmelCase__ ):
lowercase_ : Tuple = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : int = PRETRAINED_INIT_CONFIGURATION
lowercase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : List[Any] = ElectraTokenizer
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int="[UNK]" , __lowerCAmelCase : str="[SEP]" , __lowerCAmelCase : Optional[Any]="[PAD]" , __lowerCAmelCase : Optional[Any]="[CLS]" , __lowerCAmelCase : str="[MASK]" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : List[Any] , ):
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , tokenize_chinese_chars=__lowerCAmelCase , strip_accents=__lowerCAmelCase , **__lowerCAmelCase , )
__snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __lowerCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , __lowerCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __lowerCAmelCase ) != tokenize_chinese_chars
):
__snake_case = getattr(__lowerCAmelCase , normalizer_state.pop('type' ) )
__snake_case = do_lower_case
__snake_case = strip_accents
__snake_case = tokenize_chinese_chars
__snake_case = normalizer_class(**__lowerCAmelCase )
__snake_case = do_lower_case
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=None ):
__snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
__snake_case = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
| 356 | 1 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase : Tuple =100
UpperCAmelCase : List[Any] =set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00)
def _lowerCAmelCase (_lowerCAmelCase):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
UpperCamelCase_ = set()
UpperCamelCase_ = 42
UpperCamelCase_ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime):
ret.add(sub * prime)
return ret
def _lowerCAmelCase (_lowerCAmelCase = 50_00):
for number_to_partition in range(1 , _lowerCAmelCase):
if len(partition(_lowerCAmelCase)) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 504 |
import sys
UpperCAmelCase : Union[str, Any] =(
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _lowerCAmelCase (_lowerCAmelCase = N):
UpperCamelCase_ = -sys.maxsize - 1
for i in range(len(_lowerCAmelCase) - 12):
UpperCamelCase_ = 1
for j in range(13):
product *= int(n[i + j])
if product > largest_product:
UpperCamelCase_ = product
return largest_product
if __name__ == "__main__":
print(F"{solution() = }")
| 504 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase ={
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 333 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
_lowercase = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
_lowercase = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
_lowercase = BeautifulSoup(res.text, """html.parser""")
_lowercase = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F"""https://google.com{link.get('href')}""")
| 5 | 0 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self ,UpperCamelCase ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = 32 ,UpperCamelCase = True ,UpperCamelCase = 1 / 255 ,UpperCamelCase = True ,UpperCamelCase = True ,UpperCamelCase = [0.48145466, 0.4578275, 0.40821073] ,UpperCamelCase = [0.26862954, 0.26130258, 0.27577711] ,UpperCamelCase = True ,UpperCamelCase=7 ,UpperCamelCase=30 ,UpperCamelCase=400 ,UpperCamelCase=3 ,) -> Any:
snake_case__ :Union[str, Any] = parent
snake_case__ :str = do_resize
snake_case__ :str = size if size is not None else {"shortest_edge": 288}
snake_case__ :Tuple = size_divisor
snake_case__ :Optional[int] = do_rescale
snake_case__ :Tuple = rescale_factor
snake_case__ :List[Any] = do_normalize
snake_case__ :List[str] = do_center_crop
snake_case__ :List[Any] = image_mean
snake_case__ :Optional[int] = image_std
snake_case__ :Any = do_pad
snake_case__ :List[str] = batch_size
snake_case__ :Tuple = num_channels
snake_case__ :List[str] = min_resolution
snake_case__ :Union[str, Any] = max_resolution
def lowerCAmelCase_ ( self ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=False ) -> Optional[int]:
if not batched:
snake_case__ :int = self.size["shortest_edge"]
snake_case__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase ,Image.Image ):
snake_case__ , snake_case__ :Optional[Any] = image.size
else:
snake_case__ , snake_case__ :Dict = image.shape[1], image.shape[2]
snake_case__ :Any = size / min(UpperCamelCase ,UpperCamelCase )
if h < w:
snake_case__ , snake_case__ :Union[str, Any] = size, scale * w
else:
snake_case__ , snake_case__ :Dict = scale * h, size
snake_case__ :Union[str, Any] = int((1_333 / 800) * size )
if max(UpperCamelCase ,UpperCamelCase ) > max_size:
snake_case__ :int = max_size / max(UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = newh * scale
snake_case__ :List[Any] = neww * scale
snake_case__ , snake_case__ :Optional[int] = int(newh + 0.5 ), int(neww + 0.5 )
snake_case__ , snake_case__ :Dict = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
snake_case__ :Union[str, Any] = []
for image in image_inputs:
snake_case__ , snake_case__ :Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ :Tuple = max(UpperCamelCase ,key=lambda UpperCamelCase : item[0] )[0]
snake_case__ :List[Any] = max(UpperCamelCase ,key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( _A , unittest.TestCase ):
_A = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :str = BridgeTowerImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase ,"image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase ,"image_std" ) )
self.assertTrue(hasattr(UpperCamelCase ,"do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase ,"do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase ,"size" ) )
self.assertTrue(hasattr(UpperCamelCase ,"size_divisor" ) )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCAmelCase_ ( self ) -> List[Any]:
# Initialize image processor
snake_case__ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ :List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,Image.Image )
# Test not batched input
snake_case__ :Union[str, Any] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
snake_case__ :Optional[Any] = image_processing(UpperCamelCase ,return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase ,batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCAmelCase_ ( self ) -> List[str]:
# Initialize image processor
snake_case__ :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ :List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase ,numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,np.ndarray )
# Test not batched input
snake_case__ :str = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ :int = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
snake_case__ :Optional[int] = image_processing(UpperCamelCase ,return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase ,batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCAmelCase_ ( self ) -> List[str]:
# Initialize image processor
snake_case__ :Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ :str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase ,torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,torch.Tensor )
# Test not batched input
snake_case__ :List[Any] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ :List[str] = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
snake_case__ :str = image_processing(UpperCamelCase ,return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ :List[str] = self.image_processor_tester.get_expected_values(UpperCamelCase ,batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,) | 57 |
from __future__ import annotations
def lowercase_ ( __snake_case : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(__snake_case ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod() | 57 | 1 |
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
__lowercase : int = 1
__lowercase : Union[str, Any] = 2
while i * i <= n:
__lowercase : str = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def snake_case_ ( ):
__lowercase : Optional[Any] = 1
__lowercase : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCAmelCase_ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution()) | 149 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
lowerCamelCase : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowerCamelCase : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[str] = field(
default=__a , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__a )} , )
_A : Optional[str] = field(
default=__a , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_A : bool = field(
default=__a , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_A : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_A : bool = field(
default=__a , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_A : Optional[str] = field(default=__a , metadata={'''help''': '''The input training data file (a text file).'''} )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
_A : bool = field(
default=__a , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_A : Optional[int] = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
_A : Optional[int] = field(
default=__a , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
_A : Optional[int] = field(
default=__a , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_A : float = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_A : bool = field(
default=__a , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if self.train_file is not None:
__lowercase : List[str] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowercase : int = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Any ):
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as f:
__lowercase : List[str] = [json.loads(lowerCAmelCase_ ) for line in f.read().splitlines() if (len(lowerCAmelCase_ ) > 0 and not line.isspace())]
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
__lowercase : Tuple = {c: dataset[c] for c in dataset.column_names}
__lowercase : List[str] = refs
return Dataset.from_dict(lowerCAmelCase_ )
def snake_case_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase : Optional[int] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowercase : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowercase : Tuple = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowercase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
__lowercase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
__lowercase : Optional[int] = {}
if data_args.train_file is not None:
__lowercase : List[Any] = data_args.train_file
if data_args.validation_file is not None:
__lowercase : Optional[Any] = data_args.validation_file
__lowercase : Dict = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
__lowercase : Tuple = """text"""
__lowercase : str = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase : str = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowercase : List[str] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowercase : int = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_ )
else:
__lowercase : List[str] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
__lowercase : List[str] = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowercase : Any = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowercase : Optional[int] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
__lowercase : int = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
__lowercase : List[str] = AutoModelForMaskedLM.from_config(lowerCAmelCase_ )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowercase : Optional[Any] = datasets["""train"""].column_names
else:
__lowercase : Dict = datasets["""validation"""].column_names
__lowercase : Tuple = """text""" if """text""" in column_names else column_names[0]
__lowercase : List[str] = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_ : Optional[int] ):
# Remove empty lines
__lowercase : Dict = [line for line in examples["""text"""] if len(lowerCAmelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length )
__lowercase : List[str] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowercase : str = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowercase : Dict = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowercase : Union[str, Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowercase : Dict = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowercase : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase : Any = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowercase : Any = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowercase : List[str] = model_args.model_name_or_path
else:
__lowercase : List[Any] = None
__lowercase : Any = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowercase : Optional[Any] = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
__lowercase : Dict = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase : List[Any] = trainer.evaluate()
__lowercase : Any = math.exp(eval_output["""eval_loss"""] )
__lowercase : Any = perplexity
__lowercase : Optional[Any] = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def snake_case_ ( lowerCAmelCase_ : List[str] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 149 | 1 |
'''simple docstring'''
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = abs(lowerCamelCase )
lowerCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = abs(lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def a_ ( lowerCamelCase : int ):
return sum(int(lowerCamelCase ) for c in str(abs(lowerCamelCase ) ) )
def a_ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCamelCase : Callable , lowerCamelCase : int ) -> None:
lowerCAmelCase = f'''{func.__name__}({value})'''
lowerCAmelCase = timeit(f'''__main__.{call}''' , setup='import __main__' )
print(f'''{call:56} = {func(lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowerCamelCase , lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 513 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Any = (DDPMScheduler,)
def __UpperCAmelCase ( self : Optional[int] , **UpperCAmelCase__ : Union[str, Any] ) -> List[Any]:
lowerCAmelCase = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**UpperCAmelCase__ )
return config
def __UpperCAmelCase ( self : Dict ) -> Tuple:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
self.check_over_configs(thresholding=UpperCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , sample_max_value=UpperCAmelCase__ , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def __UpperCAmelCase ( self : Tuple ) -> Dict:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase__ ) ):
# 1. predict noise residual
lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def __UpperCAmelCase ( self : int ) -> Optional[int]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase__ ) ):
# 1. predict noise residual
lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
lowerCAmelCase = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase__ ):
if i == len(UpperCAmelCase__ ) - 1:
lowerCAmelCase = -1
else:
lowerCAmelCase = timesteps[i + 1]
lowerCAmelCase = scheduler.previous_timestep(UpperCAmelCase__ )
lowerCAmelCase = prev_t.item()
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Tuple:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
lowerCAmelCase = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCAmelCase__ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> List[str]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
lowerCAmelCase = [1_0_0, 8_7, 5_0, 1, 0]
lowerCAmelCase = len(UpperCAmelCase__ )
with self.assertRaises(UpperCAmelCase__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ , timesteps=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
| 513 | 1 |
'''simple docstring'''
from math import sqrt
def snake_case__ ( UpperCamelCase ) -> int:
_UpperCamelCase : List[str] = 0
for i in range(1 ,int(sqrt(UpperCamelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCamelCase ):
total += i + n // i
elif i == sqrt(UpperCamelCase ):
total += i
return total - n
def snake_case__ ( UpperCamelCase = 1_00_00 ) -> int:
_UpperCamelCase : Optional[int] = sum(
i
for i in range(1 ,UpperCamelCase )
if sum_of_divisors(sum_of_divisors(UpperCamelCase ) ) == i and sum_of_divisors(UpperCamelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 683 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCamelCase : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase : int = DDPMScheduler()
_UpperCamelCase : Optional[int] = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case , steps=4 )
_UpperCamelCase : Union[str, Any] = output.audios[0]
_UpperCamelCase : Union[str, Any] = output.images[0]
_UpperCamelCase : str = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : int = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case )
_UpperCamelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase : List[str] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : str = self.dummy_vqvae_and_unet
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Tuple = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10 )
_UpperCamelCase : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Tuple = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : Any = self.dummy_unet_condition
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : int = torch.rand((1, 1, 10) )
_UpperCamelCase : Optional[Any] = pipe(generator=_snake_case , encoding=_snake_case )
_UpperCamelCase : Dict = output.images[0]
_UpperCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = torch_device
_UpperCamelCase : int = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCamelCase : str = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case )
_UpperCamelCase : List[Any] = output.audios[0]
_UpperCamelCase : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 683 | 1 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
a_ = 299792458
# Symbols
a_ = symbols("""ct x y z""")
def __lowerCAmelCase ( A_ : float ) -> float:
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def __lowerCAmelCase ( A_ : float ) -> float:
return 1 / sqrt(1 - beta(UpperCAmelCase__ ) ** 2 )
def __lowerCAmelCase ( A_ : float ) -> np.ndarray:
return np.array(
[
[gamma(UpperCAmelCase__ ), -gamma(UpperCAmelCase__ ) * beta(UpperCAmelCase__ ), 0, 0],
[-gamma(UpperCAmelCase__ ) * beta(UpperCAmelCase__ ), gamma(UpperCAmelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def __lowerCAmelCase ( A_ : float , A_ : np.ndarray | None = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
__UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCAmelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
a_ = transform(29979245)
print("""Example of four vector: """)
print(F"ct' = {four_vector[0]}")
print(F"x' = {four_vector[1]}")
print(F"y' = {four_vector[2]}")
print(F"z' = {four_vector[3]}")
# Substitute symbols with numerical values
a_ = {ct: c, x: 1, y: 1, z: 1}
a_ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"\n{numerical_vector}")
| 708 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = ["image_processor", "tokenizer"]
__lowerCAmelCase = "FlavaImageProcessor"
__lowerCAmelCase = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : int, UpperCamelCase__ : str=None, UpperCamelCase__ : Any=None, **UpperCamelCase__ : List[Any] ) -> str:
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', UpperCamelCase__, )
_A = kwargs.pop('feature_extractor' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase__, UpperCamelCase__ )
_A = self.image_processor
def __call__( self : Union[str, Any], UpperCamelCase__ : Optional[ImageInput] = None, UpperCamelCase__ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, UpperCamelCase__ : bool = True, UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False, UpperCamelCase__ : Union[bool, str, TruncationStrategy] = False, UpperCamelCase__ : Optional[int] = None, UpperCamelCase__ : int = 0, UpperCamelCase__ : Optional[int] = None, UpperCamelCase__ : Optional[bool] = None, UpperCamelCase__ : Optional[bool] = None, UpperCamelCase__ : Optional[bool] = None, UpperCamelCase__ : Optional[bool] = None, UpperCamelCase__ : bool = False, UpperCamelCase__ : bool = False, UpperCamelCase__ : bool = False, UpperCamelCase__ : bool = False, UpperCamelCase__ : bool = True, UpperCamelCase__ : Optional[Union[str, TensorType]] = None, **UpperCamelCase__ : Dict, ) -> List[str]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_A = self.tokenizer(
text=UpperCamelCase__, add_special_tokens=UpperCamelCase__, padding=UpperCamelCase__, truncation=UpperCamelCase__, max_length=UpperCamelCase__, stride=UpperCamelCase__, pad_to_multiple_of=UpperCamelCase__, return_token_type_ids=UpperCamelCase__, return_attention_mask=UpperCamelCase__, return_overflowing_tokens=UpperCamelCase__, return_special_tokens_mask=UpperCamelCase__, return_offsets_mapping=UpperCamelCase__, return_length=UpperCamelCase__, verbose=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__, )
if images is not None:
_A = self.image_processor(
UpperCamelCase__, return_image_mask=UpperCamelCase__, return_codebook_pixels=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__, )
if text is not None and images is not None:
encoding.update(UpperCamelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ), tensor_type=UpperCamelCase__ )
def __UpperCAmelCase ( self : Union[str, Any], *UpperCamelCase__ : int, **UpperCamelCase__ : Tuple ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*UpperCamelCase__, **UpperCamelCase__ )
def __UpperCAmelCase ( self : Any, *UpperCamelCase__ : Dict, **UpperCamelCase__ : Tuple ) -> Optional[Any]:
return self.tokenizer.decode(*UpperCamelCase__, **UpperCamelCase__ )
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', UpperCamelCase__, )
return self.image_processor_class
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', UpperCamelCase__, )
return self.image_processor
| 107 |
'''simple docstring'''
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : str = n
lowerCAmelCase__ : Optional[Any] = [None] * self.n
lowerCAmelCase__ : Tuple = 0 # index of the first element
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Any = 0
def __len__( self ) -> int:
return self.size
def UpperCAmelCase_ ( self ) -> bool:
return self.size == 0
def UpperCAmelCase_ ( self ) -> str:
return False if self.is_empty() else self.array[self.front]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
lowerCAmelCase__ : List[str] = data
lowerCAmelCase__ : Any = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCAmelCase_ ( self ) -> List[Any]:
if self.size == 0:
raise Exception("""UNDERFLOW""" )
lowerCAmelCase__ : List[str] = self.array[self.front]
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Optional[int] = (self.front + 1) % self.n
self.size -= 1
return temp
| 565 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __A :
"""simple docstring"""
@staticmethod
def snake_case_( *_lowerCamelCase , **_lowerCamelCase )-> Optional[Any]:
pass
@is_pipeline_test
@require_torch
@require_vision
class __A ( unittest.TestCase ):
"""simple docstring"""
A_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )-> int:
lowercase__ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
lowercase__ = [
{
"""image""": Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> Optional[Any]:
lowercase__ = vqa_pipeline(_a , top_k=1 )
self.assertEqual(
_a , [
[{'''score''': ANY(_a ), '''answer''': ANY(_a )}],
[{'''score''': ANY(_a ), '''answer''': ANY(_a )}],
] , )
@require_torch
def snake_case_( self )-> Tuple:
lowercase__ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
lowercase__ = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowercase__ = """How many cats are there?"""
lowercase__ = vqa_pipeline(image=_a , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
_a , [{'''score''': ANY(_a ), '''answer''': ANY(_a )}, {'''score''': ANY(_a ), '''answer''': ANY(_a )}] )
lowercase__ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
_a , [{'''score''': ANY(_a ), '''answer''': ANY(_a )}, {'''score''': ANY(_a ), '''answer''': ANY(_a )}] )
@slow
@require_torch
def snake_case_( self )-> Dict:
lowercase__ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
lowercase__ = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowercase__ = """How many cats are there?"""
lowercase__ = vqa_pipeline(image=_a , question=_a , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}] )
lowercase__ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}] )
lowercase__ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def snake_case_( self )-> Optional[int]:
pass
| 710 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class __A ( a ):
"""simple docstring"""
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> None:
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 318 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = """xmod"""
def __init__( self , UpperCAmelCase_=3_05_22 , UpperCAmelCase_=7_68 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=30_72 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=5_12 , UpperCAmelCase_=2 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1e-1_2 , UpperCAmelCase_=1 , UpperCAmelCase_=0 , UpperCAmelCase_=2 , UpperCAmelCase_="absolute" , UpperCAmelCase_=True , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=2 , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=("en_XX",) , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = classifier_dropout
snake_case_ = pre_norm
snake_case_ = adapter_reduction_factor
snake_case_ = adapter_layer_norm
snake_case_ = adapter_reuse_layer_norm
snake_case_ = ln_before_adapter
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = default_language
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def _lowercase ( self ):
if self.task == "multiple-choice":
snake_case_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 508 |
'''simple docstring'''
def __snake_case ( lowercase : int ):
snake_case_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __snake_case ( lowercase : int ):
snake_case_ = 0
while number > 0:
snake_case_ = number % 10
sum_of_digits += last_digit
snake_case_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __snake_case ( lowercase : int = 100 ):
snake_case_ = factorial(lowercase )
snake_case_ = split_and_add(lowercase )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 508 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_SCREAMING_SNAKE_CASE =model(__lowerCAmelCase )['''last_hidden_state''']
_SCREAMING_SNAKE_CASE =tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __lowerCAmelCase )
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 710 |
"""simple docstring"""
def _lowerCAmelCase(a : list ) -> int:
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_SCREAMING_SNAKE_CASE =grid[0]
for row_n in range(1 , len(a ) ):
_SCREAMING_SNAKE_CASE =grid[row_n]
_SCREAMING_SNAKE_CASE =fill_row(a , a )
_SCREAMING_SNAKE_CASE =grid[row_n]
return grid[-1][-1]
def _lowerCAmelCase(a : list , a : list ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(a ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165 | 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Tuple:
UpperCAmelCase__ : List[Any] = [x.strip() for x in open(lowerCAmelCase__ ).readlines()]
UpperCAmelCase__ : Tuple = [x.strip() for x in open(lowerCAmelCase__ ).readlines()][: len(lowerCAmelCase__ )]
UpperCAmelCase__ : Optional[int] = calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
if save_path is not None:
save_json(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 75 |
'''simple docstring'''
class lowerCamelCase_ :
def __init__( self : str , _A : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = name
UpperCAmelCase__ : Union[str, Any] = val
def __str__( self : Tuple ):
'''simple docstring'''
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Union[str, Any] , _A : Dict ):
'''simple docstring'''
return self.val < other.val
class lowerCamelCase_ :
def __init__( self : int , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Any = self.build_heap(_A )
def __getitem__( self : Any , _A : Any ):
'''simple docstring'''
return self.get_value(_A )
def lowercase_ ( self : Any , _A : List[Any] ):
'''simple docstring'''
return (idx - 1) // 2
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
return idx * 2 + 1
def lowercase_ ( self : Tuple , _A : List[Any] ):
'''simple docstring'''
return idx * 2 + 2
def lowercase_ ( self : List[str] , _A : Tuple ):
'''simple docstring'''
return self.heap_dict[key]
def lowercase_ ( self : str , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(_A ) - 1
UpperCAmelCase__ : Tuple = self.get_parent_idx(_A )
for idx, i in enumerate(_A ):
UpperCAmelCase__ : Dict = idx
UpperCAmelCase__ : Optional[Any] = i.val
for i in range(_A , -1 , -1 ):
self.sift_down(_A , _A )
return array
def lowercase_ ( self : Optional[Any] , _A : str , _A : List[Any] ):
'''simple docstring'''
while True:
UpperCAmelCase__ : Any = self.get_left_child_idx(_A ) # noqa: E741
UpperCAmelCase__ : Optional[Any] = self.get_right_child_idx(_A )
UpperCAmelCase__ : Tuple = idx
if l < len(_A ) and array[l] < array[idx]:
UpperCAmelCase__ : int = l
if r < len(_A ) and array[r] < array[smallest]:
UpperCAmelCase__ : Dict = r
if smallest != idx:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = array[smallest], array[idx]
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
UpperCAmelCase__ : str = smallest
else:
break
def lowercase_ ( self : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.get_parent_idx(_A )
while p >= 0 and self.heap[p] > self.heap[idx]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.heap[idx], self.heap[p]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
UpperCAmelCase__ : Union[str, Any] = p
UpperCAmelCase__ : List[Any] = self.get_parent_idx(_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return self.heap[0]
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.heap[-1], self.heap[0]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
UpperCAmelCase__ : int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase_ ( self : int , _A : Union[str, Any] ):
'''simple docstring'''
self.heap.append(_A )
UpperCAmelCase__ : Union[str, Any] = len(self.heap ) - 1
UpperCAmelCase__ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase_ ( self : str ):
'''simple docstring'''
return len(self.heap ) == 0
def lowercase_ ( self : int , _A : Optional[Any] , _A : str ):
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
UpperCAmelCase__ : Optional[Any] = new_value
UpperCAmelCase__ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
UpperCamelCase__ = Node('''R''', -1)
UpperCamelCase__ = Node('''B''', 6)
UpperCamelCase__ = Node('''A''', 3)
UpperCamelCase__ = Node('''X''', 1)
UpperCamelCase__ = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCamelCase__ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 | 1 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class a_ ( UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = 'pixel_values'
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Dict = TimmBackboneConfig
def __init__( self , UpperCAmelCase , **UpperCAmelCase ):
requires_backends(self , """timm""" )
super().__init__(UpperCAmelCase )
a_ = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(UpperCAmelCase , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
a_ = getattr(UpperCAmelCase , """use_pretrained_backbone""" , UpperCAmelCase )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(UpperCAmelCase , """out_indices""" , UpperCAmelCase ) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase , **UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["""module"""]: str(UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCAmelCase )
@classmethod
def lowerCAmelCase__ ( cls , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ):
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("""config""" , TimmBackboneConfig() )
a_ = kwargs.pop("""use_timm_backbone""" , UpperCAmelCase )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
a_ = kwargs.pop("""num_channels""" , config.num_channels )
a_ = kwargs.pop("""features_only""" , config.features_only )
a_ = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
a_ = kwargs.pop("""out_indices""" , config.out_indices )
a_ = TimmBackboneConfig(
backbone=UpperCAmelCase , num_channels=UpperCAmelCase , features_only=UpperCAmelCase , use_pretrained_backbone=UpperCAmelCase , out_indices=UpperCAmelCase , )
return super()._from_config(UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
pass
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ):
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(UpperCAmelCase , **UpperCAmelCase )
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices )
else:
a_ = self._backbone(UpperCAmelCase , **UpperCAmelCase )
a_ = None
a_ = tuple(UpperCAmelCase )
a_ = tuple(UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase , hidden_states=UpperCAmelCase , attentions=UpperCAmelCase )
| 511 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : torch.FloatTensor
class a_ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self , UpperCAmelCase = 3 , UpperCAmelCase = 3 , UpperCAmelCase = ("DownEncoderBlock2D",) , UpperCAmelCase = ("UpDecoderBlock2D",) , UpperCAmelCase = (64,) , UpperCAmelCase = 1 , UpperCAmelCase = "silu" , UpperCAmelCase = 3 , UpperCAmelCase = 32 , UpperCAmelCase = 2_56 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = 0.1_82_15 , UpperCAmelCase = "group" , ):
super().__init__()
# pass init params to Encoder
a_ = Encoder(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , down_block_types=UpperCAmelCase , block_out_channels=UpperCAmelCase , layers_per_block=UpperCAmelCase , act_fn=UpperCAmelCase , norm_num_groups=UpperCAmelCase , double_z=UpperCAmelCase , )
a_ = vq_embed_dim if vq_embed_dim is not None else latent_channels
a_ = nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
a_ = VectorQuantizer(UpperCAmelCase , UpperCAmelCase , beta=0.25 , remap=UpperCAmelCase , sane_index_shape=UpperCAmelCase )
a_ = nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
# pass init params to Decoder
a_ = Decoder(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , up_block_types=UpperCAmelCase , block_out_channels=UpperCAmelCase , layers_per_block=UpperCAmelCase , act_fn=UpperCAmelCase , norm_num_groups=UpperCAmelCase , norm_type=UpperCAmelCase , )
@apply_forward_hook
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = True ):
a_ = self.encoder(UpperCAmelCase )
a_ = self.quant_conv(UpperCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase )
@apply_forward_hook
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = True ):
# also go through quantization layer
if not force_not_quantize:
a_ , a_ , a_ = self.quantize(UpperCAmelCase )
else:
a_ = h
a_ = self.post_quant_conv(UpperCAmelCase )
a_ = self.decoder(UpperCAmelCase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = True ):
a_ = sample
a_ = self.encode(UpperCAmelCase ).latents
a_ = self.decode(UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase )
| 511 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : List[str]=18 , lowerCAmelCase_ : str=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : int=[0.5, 0.5, 0.5] , lowerCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ) -> Tuple:
'''simple docstring'''
A__ : str =size if size is not None else {'height': 18, 'width': 18}
A__ : Dict =parent
A__ : Any =batch_size
A__ : Any =num_channels
A__ : Union[str, Any] =image_size
A__ : Tuple =min_resolution
A__ : List[Any] =max_resolution
A__ : Optional[Any] =do_resize
A__ : Any =size
A__ : Optional[Any] =do_normalize
A__ : Optional[Any] =image_mean
A__ : str =image_std
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowerCamelCase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = DPTImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__ : Optional[int] =DPTImageProcessingTester(self )
@property
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : Any =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """size""" ) )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ : List[str] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
A__ : List[str] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
# Initialize image_processing
A__ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
A__ : Dict =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A__ : Tuple =image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
# Initialize image_processing
A__ : int =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : Optional[int] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
A__ : Dict =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A__ : List[Any] =image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
# Initialize image_processing
A__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : List[str] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
A__ : int =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A__ : List[str] =image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 215 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 437 | 0 |
import math
import random
def a_ ( lowerCamelCase : float , lowerCamelCase : bool = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__snake_case =0.0_2
def a_ ( lowerCamelCase : int , lowerCamelCase : int ):
lowerCAmelCase = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(UpperCAmelCase__ ):
# Forward propagation
lowerCAmelCase = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowerCAmelCase = (expected / 100) - layer_a
# Error delta
lowerCAmelCase = layer_1_error * sigmoid_function(UpperCAmelCase__ , UpperCAmelCase__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case =int(input("""Expected value: """))
__snake_case =int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 708 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case ={
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""ChineseCLIPFeatureExtractor"""]
__snake_case =["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 513 | 0 |
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase_ = False
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = 'ybelkada/fonts'
def snake_case ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"Pix2StructImageProcessor. Please upgrade torch." )
def snake_case ( A__ ,A__ ,A__ ):
requires_backends(UpperCAmelCase_ ,["torch"] )
_check_torch_version()
UpperCAmelCase_ : Any = image_tensor.unsqueeze(0 )
UpperCAmelCase_ : int = torch.nn.functional.unfold(UpperCAmelCase_ ,(patch_height, patch_width) ,stride=(patch_height, patch_width) )
UpperCAmelCase_ : List[Any] = patches.reshape(image_tensor.size(0 ) ,image_tensor.size(1 ) ,UpperCAmelCase_ ,UpperCAmelCase_ ,-1 )
UpperCAmelCase_ : List[Any] = patches.permute(0 ,4 ,2 ,3 ,1 ).reshape(
image_tensor.size(2 ) // patch_height ,image_tensor.size(3 ) // patch_width ,image_tensor.size(1 ) * patch_height * patch_width ,)
return patches.unsqueeze(0 )
def snake_case ( A__ ,A__ = 36 ,A__ = "black" ,A__ = "white" ,A__ = 5 ,A__ = 5 ,A__ = 5 ,A__ = 5 ,A__ = None ,A__ = None ,):
requires_backends(UpperCAmelCase_ ,"vision" )
# Add new lines so that each line is no more than 80 characters.
UpperCAmelCase_ : Optional[Any] = textwrap.TextWrapper(width=80 )
UpperCAmelCase_ : List[Any] = wrapper.wrap(text=UpperCAmelCase_ )
UpperCAmelCase_ : str = '''\n'''.join(UpperCAmelCase_ )
if font_bytes is not None and font_path is None:
UpperCAmelCase_ : Any = io.BytesIO(UpperCAmelCase_ )
elif font_path is not None:
UpperCAmelCase_ : Any = font_path
else:
UpperCAmelCase_ : Dict = hf_hub_download(UpperCAmelCase_ ,"Arial.TTF" )
UpperCAmelCase_ : Tuple = ImageFont.truetype(UpperCAmelCase_ ,encoding="UTF-8" ,size=UpperCAmelCase_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCAmelCase_ : Tuple = ImageDraw.Draw(Image.new("RGB" ,(1, 1) ,UpperCAmelCase_ ) )
UpperCAmelCase_ : str = temp_draw.textbbox((0, 0) ,UpperCAmelCase_ ,UpperCAmelCase_ )
# Create the actual image with a bit of padding around the text.
UpperCAmelCase_ : str = text_width + left_padding + right_padding
UpperCAmelCase_ : Any = text_height + top_padding + bottom_padding
UpperCAmelCase_ : Dict = Image.new("RGB" ,(image_width, image_height) ,UpperCAmelCase_ )
UpperCAmelCase_ : Any = ImageDraw.Draw(UpperCAmelCase_ )
draw.text(xy=(left_padding, top_padding) ,text=UpperCAmelCase_ ,fill=UpperCAmelCase_ ,font=UpperCAmelCase_ )
return image
def snake_case ( A__ ,A__ ,**A__ ):
requires_backends(UpperCAmelCase_ ,"vision" )
# Convert to PIL image if necessary
UpperCAmelCase_ : str = to_pil_image(UpperCAmelCase_ )
UpperCAmelCase_ : List[Any] = render_text(UpperCAmelCase_ ,**UpperCAmelCase_ )
UpperCAmelCase_ : int = max(header_image.width ,image.width )
UpperCAmelCase_ : Union[str, Any] = int(image.height * (new_width / image.width) )
UpperCAmelCase_ : str = int(header_image.height * (new_width / header_image.width) )
UpperCAmelCase_ : List[Any] = Image.new("RGB" ,(new_width, new_height + new_header_height) ,"white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) ,(0, 0) )
new_image.paste(image.resize((new_width, new_height) ) ,(0, new_header_height) )
# Convert back to the original framework if necessary
UpperCAmelCase_ : str = to_numpy_array(UpperCAmelCase_ )
if infer_channel_dimension_format(UpperCAmelCase_ ) == ChannelDimension.LAST:
UpperCAmelCase_ : Any = to_channel_dimension_format(UpperCAmelCase_ ,ChannelDimension.LAST )
return new_image
class UpperCamelCase_ (__UpperCAmelCase ):
__magic_name__ = ['flattened_patches']
def __init__( self : Dict , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : int = 2_048 , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : List[str] , ) -> List[Any]:
super().__init__(**__lowercase )
UpperCAmelCase_ : List[str] = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
UpperCAmelCase_ : List[Any] = do_normalize
UpperCAmelCase_ : Optional[int] = do_convert_rgb
UpperCAmelCase_ : List[Any] = max_patches
UpperCAmelCase_ : Optional[Any] = is_vqa
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : int , lowerCAmelCase_ : dict , **lowerCAmelCase_ : Dict ) -> List[Any]:
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
UpperCAmelCase_ : Dict = to_channel_dimension_format(__lowercase , ChannelDimension.FIRST )
UpperCAmelCase_ : List[str] = torch.from_numpy(__lowercase )
UpperCAmelCase_ : Dict = patch_size['''height'''], patch_size['''width''']
UpperCAmelCase_ : int = get_image_size(__lowercase )
# maximize scale s.t.
UpperCAmelCase_ : List[str] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCAmelCase_ : Any = max(min(math.floor(scale * image_height / patch_height ) , __lowercase ) , 1 )
UpperCAmelCase_ : Union[str, Any] = max(min(math.floor(scale * image_width / patch_width ) , __lowercase ) , 1 )
UpperCAmelCase_ : Dict = max(num_feasible_rows * patch_height , 1 )
UpperCAmelCase_ : str = max(num_feasible_cols * patch_width , 1 )
UpperCAmelCase_ : Dict = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=__lowercase , antialias=__lowercase , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCAmelCase_ : Optional[Any] = torch_extract_patches(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ : Tuple = patches.shape
UpperCAmelCase_ : Optional[int] = patches_shape[1]
UpperCAmelCase_ : Tuple = patches_shape[2]
UpperCAmelCase_ : Optional[int] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCAmelCase_ : List[Any] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCAmelCase_ : Dict = torch.arange(__lowercase ).reshape([rows, 1] ).repeat(1 , __lowercase ).reshape([rows * columns, 1] )
UpperCAmelCase_ : Dict = torch.arange(__lowercase ).reshape([1, columns] ).repeat(__lowercase , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCAmelCase_ : List[Any] = row_ids.to(torch.floataa )
UpperCAmelCase_ : Optional[int] = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCAmelCase_ : Tuple = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCAmelCase_ : List[str] = torch.nn.functional.pad(__lowercase , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCAmelCase_ : str = to_numpy_array(__lowercase )
return result
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
if image.dtype == np.uinta:
UpperCAmelCase_ : int = image.astype(np.floataa )
# take mean across the whole `image`
UpperCAmelCase_ : Optional[Any] = np.mean(__lowercase )
UpperCAmelCase_ : Optional[int] = np.std(__lowercase )
UpperCAmelCase_ : List[Any] = max(__lowercase , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(__lowercase , mean=__lowercase , std=__lowercase , **__lowercase )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : str , ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ : Tuple = patch_size if patch_size is not None else self.patch_size
UpperCAmelCase_ : List[str] = max_patches if max_patches is not None else self.max_patches
UpperCAmelCase_ : List[str] = self.is_vqa
if kwargs.get("data_format" , __lowercase ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
UpperCAmelCase_ : List[Any] = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ : List[str] = [convert_to_rgb(__lowercase ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = [to_numpy_array(__lowercase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
UpperCAmelCase_ : Tuple = kwargs.pop("font_bytes" , __lowercase )
UpperCAmelCase_ : str = kwargs.pop("font_path" , __lowercase )
if isinstance(__lowercase , __lowercase ):
UpperCAmelCase_ : Tuple = [header_text] * len(__lowercase )
UpperCAmelCase_ : Optional[Any] = [
render_header(__lowercase , header_text[i] , font_bytes=__lowercase , font_path=__lowercase )
for i, image in enumerate(__lowercase )
]
if do_normalize:
UpperCAmelCase_ : Any = [self.normalize(image=__lowercase ) for image in images]
# convert to torch tensor and permute
UpperCAmelCase_ : Union[str, Any] = [
self.extract_flattened_patches(image=__lowercase , max_patches=__lowercase , patch_size=__lowercase )
for image in images
]
# create attention mask in numpy
UpperCAmelCase_ : Any = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCAmelCase_ : Any = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=__lowercase )
return encoded_outputs
| 95 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Optional[int] = 'SpeechT5FeatureExtractor'
_A : List[Any] = 'SpeechT5Tokenizer'
def __init__( self : List[str] , __lowercase : List[str] , __lowercase : int ):
'''simple docstring'''
super().__init__(__lowercase , __lowercase )
def __call__( self : Any , *__lowercase : List[str] , **__lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = kwargs.pop('''audio''' , __lowercase )
__UpperCAmelCase : str = kwargs.pop('''text''' , __lowercase )
__UpperCAmelCase : List[str] = kwargs.pop('''text_target''' , __lowercase )
__UpperCAmelCase : Optional[Any] = kwargs.pop('''audio_target''' , __lowercase )
__UpperCAmelCase : Tuple = kwargs.pop('''sampling_rate''' , __lowercase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
__UpperCAmelCase : Union[str, Any] = self.feature_extractor(__lowercase , *__lowercase , sampling_rate=__lowercase , **__lowercase )
elif text is not None:
__UpperCAmelCase : Optional[int] = self.tokenizer(__lowercase , **__lowercase )
else:
__UpperCAmelCase : List[Any] = None
if audio_target is not None:
__UpperCAmelCase : Tuple = self.feature_extractor(audio_target=__lowercase , *__lowercase , sampling_rate=__lowercase , **__lowercase )
__UpperCAmelCase : Optional[int] = targets['''input_values''']
elif text_target is not None:
__UpperCAmelCase : Optional[Any] = self.tokenizer(__lowercase , **__lowercase )
__UpperCAmelCase : int = targets['''input_ids''']
else:
__UpperCAmelCase : List[str] = None
if inputs is None:
return targets
if targets is not None:
__UpperCAmelCase : Any = labels
__UpperCAmelCase : str = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__UpperCAmelCase : List[Any] = decoder_attention_mask
return inputs
def A_ ( self : List[str] , *__lowercase : Dict , **__lowercase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = kwargs.pop('''input_values''' , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop('''input_ids''' , __lowercase )
__UpperCAmelCase : str = kwargs.pop('''labels''' , __lowercase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
__UpperCAmelCase : Any = self.feature_extractor.pad(__lowercase , *__lowercase , **__lowercase )
elif input_ids is not None:
__UpperCAmelCase : Any = self.tokenizer.pad(__lowercase , **__lowercase )
else:
__UpperCAmelCase : List[str] = None
if labels is not None:
if "input_ids" in labels or (isinstance(__lowercase , __lowercase ) and "input_ids" in labels[0]):
__UpperCAmelCase : str = self.tokenizer.pad(__lowercase , **__lowercase )
__UpperCAmelCase : str = targets['''input_ids''']
else:
__UpperCAmelCase : Union[str, Any] = self.feature_extractor.feature_size
__UpperCAmelCase : str = self.feature_extractor.num_mel_bins
__UpperCAmelCase : List[Any] = self.feature_extractor.pad(__lowercase , *__lowercase , **__lowercase )
__UpperCAmelCase : Dict = feature_size_hack
__UpperCAmelCase : Union[str, Any] = targets['''input_values''']
else:
__UpperCAmelCase : Tuple = None
if inputs is None:
return targets
if targets is not None:
__UpperCAmelCase : Tuple = labels
__UpperCAmelCase : int = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__UpperCAmelCase : Dict = decoder_attention_mask
return inputs
def A_ ( self : Optional[int] , *__lowercase : Optional[int] , **__lowercase : int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def A_ ( self : Union[str, Any] , *__lowercase : Dict , **__lowercase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__lowercase , **__lowercase ) | 522 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
a : int = TypeVar("""KT""")
a : int = TypeVar("""VT""")
class UpperCamelCase_ ( Generic[KT, VT] ):
def __init__( self , A = "root" , A = None ) -> int:
UpperCAmelCase : Union[str, Any] = key
UpperCAmelCase : List[str] = value
UpperCAmelCase : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
return f'''Node({self.key}: {self.value})'''
@property
def _lowercase( self ) -> int:
return len(self.forward )
class UpperCamelCase_ ( Generic[KT, VT] ):
def __init__( self , A = 0.5 , A = 16 ) -> Tuple:
UpperCAmelCase : Node[KT, VT] = Node[KT, VT]()
UpperCAmelCase : Dict = 0
UpperCAmelCase : List[Any] = p
UpperCAmelCase : Dict = max_level
def __str__( self ) -> str:
UpperCAmelCase : Union[str, Any] = list(self )
if len(A ) == 0:
return f'''SkipList(level={self.level})'''
UpperCAmelCase : List[Any] = max((len(str(A ) ) for item in items) , default=4 )
UpperCAmelCase : str = max(A , 4 ) + 4
UpperCAmelCase : List[str] = self.head
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[int] = node.forward.copy()
lines.append(f'''[{node.key}]'''.ljust(A , """-""" ) + """* """ * len(A ) )
lines.append(""" """ * label_size + """| """ * len(A ) )
while len(node.forward ) != 0:
UpperCAmelCase : Any = node.forward[0]
lines.append(
f'''[{node.key}]'''.ljust(A , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(A ) )
UpperCAmelCase : List[str] = node.forward
lines.append("""None""".ljust(A ) + """* """ * len(A ) )
return f'''SkipList(level={self.level})\n''' + "\n".join(A )
def __iter__( self ) -> List[str]:
UpperCAmelCase : int = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
UpperCAmelCase : Optional[int] = node.forward[0]
def _lowercase( self ) -> int:
UpperCAmelCase : List[str] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _lowercase( self , A ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Dict = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCAmelCase : Optional[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(A )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _lowercase( self , A ) -> Dict:
UpperCAmelCase , UpperCAmelCase : Dict = self._locate_node(A )
if node is not None:
for i, update_node in enumerate(A ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCAmelCase : Any = node.forward[i]
else:
UpperCAmelCase : List[Any] = update_node.forward[:i]
def _lowercase( self , A , A ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._locate_node(A )
if node is not None:
UpperCAmelCase : List[str] = value
else:
UpperCAmelCase : Optional[Any] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , A ):
update_vector.append(self.head )
UpperCAmelCase : Tuple = level
UpperCAmelCase : Optional[Any] = Node(A , A )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(A )
else:
UpperCAmelCase : Optional[Any] = new_node
def _lowercase( self , A ) -> VT | None:
UpperCAmelCase , UpperCAmelCase : Dict = self._locate_node(A )
if node is not None:
return node.value
return None
def __lowerCamelCase ( ) -> List[str]:
UpperCAmelCase : Any = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 1_2 )
skip_list.insert("""Key3""" , 4_1 )
skip_list.insert("""Key4""" , -1_9 )
UpperCAmelCase : Optional[Any] = skip_list.head
UpperCAmelCase : Optional[int] = {}
while node.level != 0:
UpperCAmelCase : Dict = node.forward[0]
UpperCAmelCase : int = node.value
assert len(_lowercase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def __lowerCamelCase ( ) -> Union[str, Any]:
UpperCAmelCase : List[str] = SkipList()
skip_list.insert("""Key1""" , 1_0 )
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 1_0 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 1_0 )
UpperCAmelCase : int = skip_list.head
UpperCAmelCase : Any = {}
while node.level != 0:
UpperCAmelCase : List[Any] = node.forward[0]
UpperCAmelCase : int = node.value
if len(_lowercase ) != 4:
print()
assert len(_lowercase ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def __lowerCamelCase ( ) -> Optional[Any]:
UpperCAmelCase : Any = SkipList()
assert skip_list.find("""Some key""" ) is None
def __lowerCamelCase ( ) -> List[str]:
UpperCAmelCase : int = SkipList()
skip_list.insert("""Key2""" , 2_0 )
assert skip_list.find("""Key2""" ) == 2_0
skip_list.insert("""Some Key""" , 1_0 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 1_3 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 1_0
assert skip_list.find("""V""" ) == 1_3
def __lowerCamelCase ( ) -> str:
UpperCAmelCase : List[Any] = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __lowerCamelCase ( ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCamelCase ( ) -> str:
UpperCAmelCase : Optional[Any] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 1_4
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4_2 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""X""" )
def traverse_keys(_lowercase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_lowercase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCamelCase ( ) -> List[Any]:
def is_sorted(_lowercase ):
return all(next_item >= item for item, next_item in zip(_lowercase , lst[1:] ) )
UpperCAmelCase : int = SkipList()
for i in range(1_0 ):
skip_list.insert(_lowercase , _lowercase )
assert is_sorted(list(_lowercase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_lowercase ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(_lowercase ) )
def __lowerCamelCase ( ) -> List[str]:
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCamelCase ( ) -> List[str]:
UpperCAmelCase : List[Any] = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 672 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a : Tuple = False
class UpperCamelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Any = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : List[str] = pipe(
image=A , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCAmelCase : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , ) -> List[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = is_training
UpperCamelCase_ = use_auxiliary_loss
UpperCamelCase_ = num_queries
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_size
UpperCamelCase_ = max_size
UpperCamelCase_ = num_labels
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = hidden_dim
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
UpperCamelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase )
UpperCamelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5
).float()
UpperCamelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long()
UpperCamelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCamelCase_ = self.num_queries
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = [1, 1, 1, 1]
UpperCamelCase_ = self.num_channels
UpperCamelCase_ = 64
UpperCamelCase_ = 128
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
return config
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = output.encoder_hidden_states
UpperCamelCase_ = output.pixel_decoder_hidden_states
UpperCamelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any:
with torch.no_grad():
UpperCamelCase_ = MaskaFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
UpperCamelCase_ = model(
pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> str:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = (self.model_tester.min_size,) * 2
UpperCamelCase_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_UpperCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_UpperCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(),
}
UpperCamelCase_ = self.model_tester.get_config()
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _UpperCAmelCase ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
UpperCamelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case__ : List[Any] = 1E-4
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# masks_queries_logits
UpperCamelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCamelCase_ = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
UpperCamelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCamelCase_ = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCamelCase_ = inputs['pixel_values'].to(_UpperCAmelCase )
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['mask_labels']]
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 23 |
from __future__ import annotations
def __a ( A__ : list[int | str] ):
create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] )
def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ):
if index == len(A__ ):
print(A__ )
return
for i in range(len(A__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE = True
create_state_space_tree(A__ , A__ , index + 1 , A__ )
current_sequence.pop()
SCREAMING_SNAKE_CASE = False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 16 | 0 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class a_ :
def __init__( self :Optional[Any] , _lowercase :Dict , _lowercase :Any=13 , _lowercase :str=7 , _lowercase :List[Any]=6 , _lowercase :str=17 , _lowercase :int=23 , _lowercase :Tuple=11 , _lowercase :Union[str, Any]=True , ) -> Any:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = act_dim
UpperCAmelCase_ = state_dim
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = max_length
UpperCAmelCase_ = is_training
def __a ( self :List[Any]) -> Union[str, Any]:
UpperCAmelCase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim))
UpperCAmelCase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim))
UpperCAmelCase_ = floats_tensor((self.batch_size, self.seq_length, 1))
UpperCAmelCase_ = floats_tensor((self.batch_size, self.seq_length, 1))
UpperCAmelCase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000)
UpperCAmelCase_ = random_attention_mask((self.batch_size, self.seq_length))
UpperCAmelCase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __a ( self :Dict) -> Dict:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __a ( self :Dict , _lowercase :Tuple , _lowercase :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :List[str] , _lowercase :Union[str, Any] , _lowercase :Optional[int] , _lowercase :Optional[Any] , ) -> Tuple:
UpperCAmelCase_ = DecisionTransformerModel(config=_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = model(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
self.parent.assertEqual(result.state_preds.shape , states.shape)
self.parent.assertEqual(result.action_preds.shape , actions.shape)
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size)) # seq length *3 as there are 3 modelities: states, returns and actions
def __a ( self :Union[str, Any]) -> List[str]:
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class a_ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =(DecisionTransformerModel,) if is_torch_available() else ()
UpperCamelCase__ : Optional[int] =()
UpperCamelCase__ : Optional[Any] ={"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
UpperCamelCase__ : Union[str, Any] =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : str =False
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : Union[str, Any] =False
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Union[str, Any] =False
UpperCamelCase__ : str =False
def __a ( self :Union[str, Any]) -> List[str]:
UpperCAmelCase_ = DecisionTransformerModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_lowercase , hidden_size=37)
def __a ( self :Tuple) -> Tuple:
self.config_tester.run_common_tests()
def __a ( self :Optional[Any]) -> Optional[Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
@slow
def __a ( self :Tuple) -> str:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = DecisionTransformerModel.from_pretrained(_lowercase)
self.assertIsNotNone(_lowercase)
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_lowercase)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(_lowercase)] , _lowercase)
@require_torch
class a_ ( unittest.TestCase ):
@slow
def __a ( self :int) -> Union[str, Any]:
UpperCAmelCase_ = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase_ = 10 # defined by the RL environment, may be normalized
UpperCAmelCase_ = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''')
UpperCAmelCase_ = model.to(_lowercase)
UpperCAmelCase_ = model.config
torch.manual_seed(0)
UpperCAmelCase_ = torch.randn(1 , 1 , config.state_dim).to(device=_lowercase , dtype=torch.floataa) # env.reset()
UpperCAmelCase_ = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=_lowercase)
UpperCAmelCase_ = torch.tensor(_lowercase , device=_lowercase , dtype=torch.floataa).reshape(1 , 1 , 1)
UpperCAmelCase_ = state
UpperCAmelCase_ = torch.zeros(1 , 0 , config.act_dim , device=_lowercase , dtype=torch.floataa)
UpperCAmelCase_ = torch.zeros(1 , 0 , device=_lowercase , dtype=torch.floataa)
UpperCAmelCase_ = torch.tensor(0 , device=_lowercase , dtype=torch.long).reshape(1 , 1)
for step in range(_lowercase):
UpperCAmelCase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_lowercase)] , dim=1)
UpperCAmelCase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=_lowercase)] , dim=1)
UpperCAmelCase_ = torch.ones(1 , states.shape[1]).to(dtype=torch.long , device=states.device)
with torch.no_grad():
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model(
states=_lowercase , actions=_lowercase , rewards=_lowercase , returns_to_go=_lowercase , timesteps=_lowercase , attention_mask=_lowercase , return_dict=_lowercase , )
self.assertEqual(action_pred.shape , actions.shape)
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4))
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim).to(device=_lowercase , dtype=torch.floataa),
1.0,
False,
{},
)
UpperCAmelCase_ = action_pred[0, -1]
UpperCAmelCase_ = torch.cat([states, state] , dim=1)
UpperCAmelCase_ = returns_to_go[0, -1] - reward
UpperCAmelCase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1)] , dim=1)
UpperCAmelCase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=_lowercase , dtype=torch.long) * (step + 1)] , dim=1)
| 561 |
UpperCamelCase_ = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = set()
# keep track of all the paths to be checked
UpperCAmelCase_ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCAmelCase_ = queue.pop(0 )
# get the last node from the path
UpperCAmelCase_ = path[-1]
if node not in explored:
UpperCAmelCase_ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCAmelCase_ = list(__UpperCAmelCase )
new_path.append(__UpperCAmelCase )
queue.append(__UpperCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCAmelCase )
# in case there's no path between the 2 nodes
return []
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCAmelCase_ = [start]
UpperCAmelCase_ = set(__UpperCAmelCase )
# Keep tab on distances from `start` node.
UpperCAmelCase_ = {start: 0, target: -1}
while queue:
UpperCAmelCase_ = queue.pop(0 )
if node == target:
UpperCAmelCase_ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCAmelCase )
queue.append(__UpperCAmelCase )
UpperCAmelCase_ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 561 | 1 |
"""simple docstring"""
def lowercase__ ( lowerCAmelCase : Any ) -> int:
"""simple docstring"""
UpperCAmelCase = [False] * len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = [-1] * len(_SCREAMING_SNAKE_CASE )
def dfs(lowerCAmelCase : Any , lowerCAmelCase : Optional[int] ):
UpperCAmelCase = True
UpperCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(_SCREAMING_SNAKE_CASE , 1 - c )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if not visited[i]:
dfs(_SCREAMING_SNAKE_CASE , 0 )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 373 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
_snake_case = b * b - 4 * a * c
_snake_case = (-b + sqrt(_SCREAMING_SNAKE_CASE )) / (2 * a)
_snake_case = (-b - sqrt(_SCREAMING_SNAKE_CASE )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __SCREAMING_SNAKE_CASE ( ):
_snake_case, _snake_case = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main() | 585 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase = tempfile.mkdtemp()
lowercase = BlipImageProcessor()
lowercase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
lowercase = BlipProcessor(a , a )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : Any , **a : Union[str, Any] ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).tokenizer
def _lowerCAmelCase ( self : Tuple , **a : Any ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).image_processor
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase = self.get_image_processor(do_normalize=a , padding_value=1.0 )
lowercase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=a , image_processor=a )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(a , return_tensors='''np''' )
lowercase = processor(images=a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=a , image_processor=a )
lowercase = '''lower newer'''
lowercase = processor(text=a )
lowercase = tokenizer(a , return_token_type_ids=a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=a , image_processor=a )
lowercase = '''lower newer'''
lowercase = self.prepare_image_inputs()
lowercase = processor(text=a , images=a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(a ):
processor()
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=a , image_processor=a )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(a )
lowercase = tokenizer.batch_decode(a )
self.assertListEqual(a , a )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=a , image_processor=a )
lowercase = '''lower newer'''
lowercase = self.prepare_image_inputs()
lowercase = processor(text=a , images=a )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] ) | 716 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 396 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A : List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Dict = PegasusConfig
__UpperCAmelCase : int = {}
__UpperCAmelCase : Tuple = "gelu"
def __init__( self : List[str] , lowercase__ : int , lowercase__ : Union[str, Any]=1_3 , lowercase__ : Dict=7 , lowercase__ : Optional[Any]=True , lowercase__ : str=False , lowercase__ : Optional[int]=9_9 , lowercase__ : Tuple=3_2 , lowercase__ : Any=5 , lowercase__ : Any=4 , lowercase__ : Any=3_7 , lowercase__ : Any=0.1 , lowercase__ : List[str]=0.1 , lowercase__ : Tuple=2_0 , lowercase__ : str=2 , lowercase__ : int=1 , lowercase__ : Dict=0 , ):
__lowercase : int = parent
__lowercase : str = batch_size
__lowercase : Tuple = seq_length
__lowercase : Tuple = is_training
__lowercase : Dict = use_labels
__lowercase : List[str] = vocab_size
__lowercase : int = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : int = intermediate_size
__lowercase : Any = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : List[Any] = max_position_embeddings
__lowercase : int = eos_token_id
__lowercase : Union[str, Any] = pad_token_id
__lowercase : Union[str, Any] = bos_token_id
def snake_case ( self : int ):
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__lowercase : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase : Optional[Any] = prepare_pegasus_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def snake_case ( self : str , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ):
__lowercase : Union[str, Any] = 2_0
__lowercase : List[Any] = model_class_name(lowercase__ )
__lowercase : Tuple = model.encode(inputs_dict["input_ids"] )
__lowercase ,__lowercase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase : Any = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
__lowercase : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__lowercase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase__ , )
__lowercase : List[Any] = model.decode(lowercase__ , lowercase__ )
__lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def snake_case ( self : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : Optional[Any] ):
__lowercase : Any = 2_0
__lowercase : Any = model_class_name(lowercase__ )
__lowercase : List[Any] = model.encode(inputs_dict["input_ids"] )
__lowercase ,__lowercase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowercase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
__lowercase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase : str = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : Union[str, Any] = model.decode(lowercase__ , lowercase__ , decoder_attention_mask=lowercase__ )
__lowercase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase=None, _lowerCamelCase=None, ) ->int:
"""simple docstring"""
if attention_mask is None:
__lowercase : List[str] = np.not_equal(_lowerCamelCase, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__lowercase : Optional[int] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__UpperCAmelCase : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__UpperCAmelCase : Dict = True
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
def snake_case ( self : List[Any] ):
__lowercase : Optional[Any] = FlaxPegasusModelTester(self )
__lowercase : Optional[Any] = ConfigTester(self , config_class=lowercase__ )
def snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
__lowercase ,__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase__ , lowercase__ , lowercase__ )
def snake_case ( self : Optional[int] ):
__lowercase ,__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase__ , lowercase__ , lowercase__ )
def snake_case ( self : Tuple ):
__lowercase ,__lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : Union[str, Any] = self._prepare_for_class(lowercase__ , lowercase__ )
__lowercase : List[str] = model_class(lowercase__ )
@jax.jit
def encode_jitted(lowercase__ : List[str] , lowercase__ : int=None , **lowercase__ : Tuple ):
return model.encode(input_ids=lowercase__ , attention_mask=lowercase__ )
with self.subTest("JIT Enabled" ):
__lowercase : List[Any] = encode_jitted(**lowercase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase : Optional[Any] = encode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( self : Optional[Any] ):
__lowercase ,__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : Union[str, Any] = model_class(lowercase__ )
__lowercase : List[str] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__lowercase : Optional[int] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Any ):
return model.decode(
decoder_input_ids=lowercase__ , decoder_attention_mask=lowercase__ , encoder_outputs=lowercase__ , )
with self.subTest("JIT Enabled" ):
__lowercase : Tuple = decode_jitted(**lowercase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase : Any = decode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case ( self : Any ):
for model_class_name in self.all_model_classes:
__lowercase : int = model_class_name.from_pretrained("google/pegasus-large" , from_pt=lowercase__ )
__lowercase : Any = np.ones((1, 1) )
__lowercase : Tuple = model(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
def snake_case ( self : Optional[int] ):
__lowercase : str = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
__lowercase : Optional[Any] = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
__lowercase : Any = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__lowercase : Union[str, Any] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
__lowercase : Tuple = tokenizer(lowercase__ , return_tensors="np" , truncation=lowercase__ , max_length=5_1_2 , padding=lowercase__ )
__lowercase : Tuple = model.generate(**lowercase__ , num_beams=2 ).sequences
__lowercase : str = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__ )
assert tgt_text == decoded
| 575 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__lowercase : Union[str, Any] = ksize + 1
__lowercase : Union[str, Any] = np.zeros((ksize, ksize), dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
__lowercase : Union[str, Any] = x - ksize // 2
__lowercase : List[Any] = y - ksize // 2
# degree to radiant
__lowercase : int = theta / 1_80 * np.pi
__lowercase : int = np.cos(_theta )
__lowercase : List[str] = np.sin(_theta )
# get kernel x
__lowercase : Dict = cos_theta * px + sin_theta * py
# get kernel y
__lowercase : Any = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase : List[str] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__A : List[Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
__A : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__A : int = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
__A : Dict = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__A : str = out / out.max() * 2_5_5
__A : Dict = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 575 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ,unittest.TestCase ):
'''simple docstring'''
__a : Optional[int] = KandinskyVaaImgaImgPipeline
__a : Dict = ["image_embeds", "negative_image_embeds", "image"]
__a : Union[str, Any] = [
"image_embeds",
"negative_image_embeds",
"image",
]
__a : str = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__a : Union[str, Any] = False
@property
def A ( self : Optional[int] ) -> str:
'''simple docstring'''
return 3_2
@property
def A ( self : Any ) -> List[str]:
'''simple docstring'''
return 3_2
@property
def A ( self : Dict ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def A ( self : Dict ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
return 1_0_0
@property
def A ( self : Tuple ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCamelCase__ = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def A ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.dummy_unet
UpperCamelCase__ = self.dummy_movq
UpperCamelCase__ = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
UpperCamelCase__ = DDIMScheduler(**UpperCamelCase__ )
UpperCamelCase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A ( self : int , lowercase : Optional[Any] , lowercase : Dict=0 ) -> Any:
'''simple docstring'''
UpperCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
UpperCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
# create init_image
UpperCamelCase__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
if str(UpperCamelCase__ ).startswith("""mps""" ):
UpperCamelCase__ = torch.manual_seed(UpperCamelCase__ )
else:
UpperCamelCase__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
UpperCamelCase__ = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def A ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = """cpu"""
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**UpperCamelCase__ )
UpperCamelCase__ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase__ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
UpperCamelCase__ = output.images
UpperCamelCase__ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase__ = np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Any ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : str ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
UpperCamelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCamelCase__ = """A red cartoon frog, 4k"""
UpperCamelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
UpperCamelCase__ = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
UpperCamelCase__ = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase__ , UpperCamelCase__ = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCamelCase__ = pipeline(
image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="""np""" , )
UpperCamelCase__ = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 700 |
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowerCamelCase_ : Any = get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( enum.Enum ):
'''simple docstring'''
__a : Tuple = "all_checks"
__a : List[Any] = "basic_checks"
__a : Dict = "no_checks"
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __magic_name__( _A , _A , _A=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(_A ) - set(_A ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_A ) - set(_A ) ) )
if len(set(_A ) - set(_A ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_A ) - set(_A ) ) )
UpperCamelCase__ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCamelCase__ = """ for """ + verification_name if verification_name is not None else """"""
if len(_A ) > 0:
raise NonMatchingChecksumError(
f"Checksums didn't match{for_verification_name}:\n"
f"{bad_urls}\n"
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __magic_name__( _A , _A ):
'''simple docstring'''
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(_A ) - set(_A ) ) > 0:
raise ExpectedMoreSplits(str(set(_A ) - set(_A ) ) )
if len(set(_A ) - set(_A ) ) > 0:
raise UnexpectedSplits(str(set(_A ) - set(_A ) ) )
UpperCamelCase__ = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_A ) > 0:
raise NonMatchingSplitsSizesError(str(_A ) )
logger.info("""All the splits matched successfully.""" )
def __magic_name__( _A , _A = True ):
'''simple docstring'''
if record_checksum:
UpperCamelCase__ = shaaaa()
with open(_A , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(_A )
UpperCamelCase__ = m.hexdigest()
else:
UpperCamelCase__ = None
return {"num_bytes": os.path.getsize(_A ), "checksum": checksum}
def __magic_name__( _A ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 265 | 0 |
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowerCamelCase ( a_ : Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :Optional[Any] = {}
__SCREAMING_SNAKE_CASE :Union[str, Any] = job['''started_at''']
__SCREAMING_SNAKE_CASE :str = job['''completed_at''']
__SCREAMING_SNAKE_CASE :List[str] = date_parser.parse(a_ )
__SCREAMING_SNAKE_CASE :str = date_parser.parse(a_ )
__SCREAMING_SNAKE_CASE :List[Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__SCREAMING_SNAKE_CASE :Optional[int] = start
__SCREAMING_SNAKE_CASE :List[str] = end
__SCREAMING_SNAKE_CASE :Optional[Any] = duration_in_min
return job_info
def __lowerCamelCase ( a_ : str , a_ : Tuple=None ) -> Tuple:
__SCREAMING_SNAKE_CASE :Optional[Any] = None
if token is not None:
__SCREAMING_SNAKE_CASE :Any = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
__SCREAMING_SNAKE_CASE :Optional[int] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__SCREAMING_SNAKE_CASE :str = requests.get(a_ , headers=a_ ).json()
__SCREAMING_SNAKE_CASE :Optional[Any] = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(a_ ) for job in result['''jobs''']} )
__SCREAMING_SNAKE_CASE :str = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(a_ ):
__SCREAMING_SNAKE_CASE :List[Any] = requests.get(url + f'''&page={i + 2}''' , headers=a_ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(a_ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = get_job_time(args.workflow_run_id)
lowerCamelCase_ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'{k}: {v["duration"]}') | 498 |
"""simple docstring"""
import sys
def __lowerCamelCase ( a_ : List[Any] ) -> str:
__SCREAMING_SNAKE_CASE :str = len(a_ )
__SCREAMING_SNAKE_CASE :List[Any] = [[0 for x in range(a_ )] for x in range(a_ )]
__SCREAMING_SNAKE_CASE :Any = [[0 for x in range(a_ )] for x in range(a_ )]
for chain_length in range(2 , a_ ):
for a in range(1 , n - chain_length + 1 ):
__SCREAMING_SNAKE_CASE :int = a + chain_length - 1
__SCREAMING_SNAKE_CASE :List[Any] = sys.maxsize
for c in range(a_ , a_ ):
__SCREAMING_SNAKE_CASE :Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__SCREAMING_SNAKE_CASE :str = cost
__SCREAMING_SNAKE_CASE :str = c
return matrix, sol
def __lowerCamelCase ( a_ : Optional[Any] , a_ : Any , a_ : List[Any] ) -> List[str]:
if i == j:
print('''A''' + str(a_ ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(a_ , a_ , optimal_solution[i][j] )
print_optiomal_solution(a_ , optimal_solution[i][j] + 1 , a_ )
print(''')''' , end=''' ''' )
def __lowerCamelCase ( ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = [30, 35, 15, 5, 10, 20, 25]
__SCREAMING_SNAKE_CASE :Any = len(a_ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[str] = matrix_chain_order(a_ )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(a_ , 1 , n - 1 )
if __name__ == "__main__":
main() | 498 | 1 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
__SCREAMING_SNAKE_CASE = True
except (ImportError, ModuleNotFoundError):
__SCREAMING_SNAKE_CASE = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def A_ ( __lowercase ):
re.sub('<n>' , '' , __lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowercase ) )
| 395 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['MaskFormerFeatureExtractor']
__SCREAMING_SNAKE_CASE = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__SCREAMING_SNAKE_CASE = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 395 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ):
"""simple docstring"""
if "resnet-50" in model_name:
UpperCamelCase = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
UpperCamelCase = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
UpperCamelCase = DetrConfig(use_timm_backbone=_lowercase ,backbone_config=_lowercase )
# set label attributes
UpperCamelCase = '''panoptic''' in model_name
if is_panoptic:
UpperCamelCase = 250
else:
UpperCamelCase = 91
UpperCamelCase = '''huggingface/label-files'''
UpperCamelCase = '''coco-detection-id2label.json'''
UpperCamelCase = json.load(open(hf_hub_download(_lowercase ,_lowercase ,repo_type='''dataset''' ) ,'''r''' ) )
UpperCamelCase = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = state_dict.pop(_lowercase )
UpperCamelCase = val
def __snake_case ( _lowercase ,_lowercase=False ):
"""simple docstring"""
UpperCamelCase = ''''''
if is_panoptic:
UpperCamelCase = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
UpperCamelCase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase = in_proj_weight_cross_attn[:256, :]
UpperCamelCase = in_proj_bias_cross_attn[:256]
UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase = in_proj_bias_cross_attn[256:512]
UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase = in_proj_bias_cross_attn[-256:]
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
return im
@torch.no_grad()
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = get_detr_config(_lowercase )
# load original model from torch hub
UpperCamelCase = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f'Converting model {model_name}...' )
UpperCamelCase = torch.hub.load('''facebookresearch/detr''' ,model_name_to_original_name[model_name] ,pretrained=_lowercase ).eval()
UpperCamelCase = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_lowercase ):
if is_panoptic:
UpperCamelCase = '''detr.''' + src
rename_key(_lowercase ,_lowercase ,_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase ,is_panoptic=_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
UpperCamelCase = state_dict.pop(_lowercase )
UpperCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase = state_dict.pop(_lowercase )
UpperCamelCase = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
UpperCamelCase = state_dict.pop(_lowercase )
UpperCamelCase = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase = state_dict.pop(_lowercase )
UpperCamelCase = val
# finally, create HuggingFace model and load state dict
UpperCamelCase = DetrForSegmentation(_lowercase ) if is_panoptic else DetrForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# verify our conversion on an image
UpperCamelCase = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
UpperCamelCase = DetrImageProcessor(format=_lowercase )
UpperCamelCase = processor(images=prepare_img() ,return_tensors='''pt''' )
UpperCamelCase = encoding['''pixel_values''']
UpperCamelCase = detr(_lowercase )
UpperCamelCase = model(_lowercase )
assert torch.allclose(outputs.logits ,original_outputs['''pred_logits'''] ,atol=1e-3 )
assert torch.allclose(outputs.pred_boxes ,original_outputs['''pred_boxes'''] ,atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs['''pred_masks'''] ,atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 34 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )
UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase = sum(single_char_strings.values() )
# one length string
UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase = single_char_strings[ch]
UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
UpperCamelCase = sum(two_char_strings.values() )
UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase = cha + cha
if sequence in two_char_strings:
UpperCamelCase = two_char_strings[sequence]
UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = Counter() # type: ignore
UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 34 | 1 |
def A ( lowercase ) -> int:
'''simple docstring'''
stooge(lowercase , 0 , len(lowercase ) - 1 )
return arr
def A ( lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase , UpperCamelCase = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase , lowercase , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase , i + t , (lowercase) )
# Recursively sort first 2/3 elements
stooge(lowercase , lowercase , (h - t) )
if __name__ == "__main__":
_UpperCAmelCase : int = input("Enter numbers separated by a comma:\n").strip()
_UpperCAmelCase : Optional[Any] = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 3 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase : List[str] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_UpperCAmelCase : Optional[int] = {
"camembert-base": 512,
}
_UpperCAmelCase : Union[str, Any] = "▁"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ["input_ids", "attention_mask"]
__lowercase : Tuple = CamembertTokenizer
def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) -> List[Any]:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 3 | 1 |
"""simple docstring"""
def lowercase (_snake_case ,_snake_case ) -> float:
'''simple docstring'''
_validate_point(_snake_case )
_validate_point(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(_snake_case ,_snake_case ) ) )
def lowercase (_snake_case ) -> None:
'''simple docstring'''
if point:
if isinstance(_snake_case ,_snake_case ):
for item in point:
if not isinstance(_snake_case ,(int, float) ):
__UpperCamelCase = (
"Expected a list of numbers as input, found "
f"""{type(_snake_case ).__name__}"""
)
raise TypeError(_snake_case )
else:
__UpperCamelCase = f"""Expected a list of numbers as input, found {type(_snake_case ).__name__}"""
raise TypeError(_snake_case )
else:
raise ValueError("Missing an input" )
def lowercase (_snake_case ,_snake_case ) -> float:
'''simple docstring'''
_validate_point(_snake_case )
_validate_point(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(_snake_case ,_snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 505 |
"""simple docstring"""
import os
def lowercase (_snake_case = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(_snake_case ) ,_snake_case ) ) as in_file:
__UpperCamelCase = in_file.read()
__UpperCamelCase = [[int(_snake_case ) for cell in row.split("," )] for row in data.strip().splitlines()]
__UpperCamelCase = [[0 for cell in row] for row in grid]
__UpperCamelCase = len(grid[0] )
__UpperCamelCase = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
__UpperCamelCase = grid[0][0]
for i in range(1 ,_snake_case ):
__UpperCamelCase = grid[0][i] + dp[0][i - 1]
for i in range(1 ,_snake_case ):
__UpperCamelCase = grid[i][0] + dp[i - 1][0]
for i in range(1 ,_snake_case ):
for j in range(1 ,_snake_case ):
__UpperCamelCase = grid[i][j] + min(dp[i - 1][j] ,dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"""{solution() = }""") | 505 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] = logging.get_logger(__name__)
_A : str = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : str = "unispeech"
def __init__( self : Dict , A : Any=3_2 , A : Optional[int]=7_6_8 , A : Optional[int]=1_2 , A : Optional[Any]=1_2 , A : Optional[Any]=3_0_7_2 , A : Union[str, Any]="gelu" , A : Tuple=0.1 , A : Dict=0.1 , A : Optional[int]=0.1 , A : List[str]=0.0 , A : List[str]=0.0 , A : Tuple=0.1 , A : int=0.1 , A : Any=0.02 , A : int=1e-5 , A : Dict="group" , A : Optional[int]="gelu" , A : Dict=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , A : Dict=(5, 2, 2, 2, 2, 2, 2) , A : List[str]=(1_0, 3, 3, 3, 3, 2, 2) , A : List[Any]=False , A : List[str]=1_2_8 , A : Optional[int]=1_6 , A : int=False , A : Any=True , A : Any=0.05 , A : List[Any]=1_0 , A : Tuple=2 , A : List[str]=0.0 , A : Any=1_0 , A : str=0 , A : Union[str, Any]=3_2_0 , A : Optional[int]=2 , A : str=0.1 , A : Tuple=1_0_0 , A : Union[str, Any]=2_5_6 , A : Optional[int]=2_5_6 , A : List[str]=0.1 , A : Tuple="mean" , A : Dict=False , A : Tuple=False , A : Optional[Any]=2_5_6 , A : Optional[int]=8_0 , A : Tuple=0 , A : Any=1 , A : List[Any]=2 , A : str=0.5 , **A : Optional[Any] , ) ->Tuple:
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Dict = feat_extract_norm
lowerCamelCase__ : Optional[int] = feat_extract_activation
lowerCamelCase__ : Dict = list(A )
lowerCamelCase__ : Tuple = list(A )
lowerCamelCase__ : Tuple = list(A )
lowerCamelCase__ : List[Any] = conv_bias
lowerCamelCase__ : str = num_conv_pos_embeddings
lowerCamelCase__ : Any = num_conv_pos_embedding_groups
lowerCamelCase__ : int = len(self.conv_dim )
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Optional[Any] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Tuple = hidden_dropout
lowerCamelCase__ : List[Any] = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : int = feat_proj_dropout
lowerCamelCase__ : Optional[Any] = final_dropout
lowerCamelCase__ : List[Any] = layerdrop
lowerCamelCase__ : List[Any] = layer_norm_eps
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : Any = num_ctc_classes
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = do_stable_layer_norm
lowerCamelCase__ : Dict = use_weighted_layer_sum
lowerCamelCase__ : str = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__ : Tuple = apply_spec_augment
lowerCamelCase__ : Union[str, Any] = mask_time_prob
lowerCamelCase__ : Tuple = mask_time_length
lowerCamelCase__ : int = mask_time_min_masks
lowerCamelCase__ : List[Any] = mask_feature_prob
lowerCamelCase__ : Dict = mask_feature_length
lowerCamelCase__ : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__ : List[Any] = num_codevectors_per_group
lowerCamelCase__ : Optional[int] = num_codevector_groups
lowerCamelCase__ : Union[str, Any] = contrastive_logits_temperature
lowerCamelCase__ : Optional[int] = feat_quantizer_dropout
lowerCamelCase__ : Optional[Any] = num_negatives
lowerCamelCase__ : Dict = codevector_dim
lowerCamelCase__ : List[str] = proj_codevector_dim
lowerCamelCase__ : str = diversity_loss_weight
# ctc loss
lowerCamelCase__ : List[Any] = ctc_loss_reduction
lowerCamelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCamelCase__ : Any = replace_prob
@property
def __lowerCamelCase ( self : int ) ->Any:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 130 |
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ) ->int:
lowerCamelCase__ : Optional[Any] = (0, 0)
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Union[str, Any] = 0
def __eq__( self : Optional[int] , A : Optional[Any] ) ->List[Any]:
return self.position == cell.position
def __lowerCamelCase ( self : List[str] ) ->int:
print(self.position )
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , A : List[str]=(5, 5) ) ->Optional[int]:
lowerCamelCase__ : int = np.zeros(A )
lowerCamelCase__ : Optional[int] = world_size[0]
lowerCamelCase__ : Optional[int] = world_size[1]
def __lowerCamelCase ( self : List[str] ) ->List[str]:
print(self.w )
def __lowerCamelCase ( self : Union[str, Any] , A : str ) ->Optional[Any]:
lowerCamelCase__ : Any = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
lowerCamelCase__ : List[Any] = cell.position[0]
lowerCamelCase__ : Union[str, Any] = cell.position[1]
lowerCamelCase__ : int = []
for n in neughbour_cord:
lowerCamelCase__ : Tuple = current_x + n[0]
lowerCamelCase__ : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
lowerCamelCase__ : List[Any] = Cell()
lowerCamelCase__ : Tuple = (x, y)
lowerCamelCase__ : List[Any] = cell
neighbours.append(A )
return neighbours
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Dict = []
lowerCamelCase__ : List[str] = []
_open.append(UpperCAmelCase )
while _open:
lowerCamelCase__ : Any = np.argmin([n.f for n in _open] )
lowerCamelCase__ : List[str] = _open[min_f]
_closed.append(_open.pop(UpperCAmelCase ) )
if current == goal:
break
for n in world.get_neigbours(UpperCAmelCase ):
for c in _closed:
if c == n:
continue
lowerCamelCase__ : Any = current.g + 1
lowerCamelCase__ , lowerCamelCase__ : str = n.position
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = goal.position
lowerCamelCase__ : Optional[Any] = (ya - ya) ** 2 + (xa - xa) ** 2
lowerCamelCase__ : List[Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(UpperCAmelCase )
lowerCamelCase__ : List[Any] = []
while current.parent is not None:
path.append(current.position )
lowerCamelCase__ : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
_A : Dict = Gridworld()
# Start position and goal
_A : Any = Cell()
_A : int = (0, 0)
_A : Optional[int] = Cell()
_A : Tuple = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
_A : int = astar(world, start, goal)
# Just for visual reasons.
for i in s:
_A : List[Any] = 1
print(world.w)
| 130 | 1 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a__ : Any = get_logger(__name__)
class UpperCAmelCase_ ( enum.Enum ):
__lowerCAmelCase : List[Any] = """all_checks"""
__lowerCAmelCase : str = """basic_checks"""
__lowerCAmelCase : str = """no_checks"""
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
pass
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
pass
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
pass
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
pass
def UpperCAmelCase_ ( _UpperCAmelCase :Optional[dict] , _UpperCAmelCase :dict , _UpperCAmelCase :str=None ) -> int:
'''simple docstring'''
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
A_ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
A_ = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(_UpperCAmelCase ) > 0:
raise NonMatchingChecksumError(
f'Checksums didn\'t match{for_verification_name}:\n'
f'{bad_urls}\n'
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
pass
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
pass
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
pass
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
pass
def UpperCAmelCase_ ( _UpperCAmelCase :Optional[dict] , _UpperCAmelCase :dict ) -> str:
'''simple docstring'''
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
A_ = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_UpperCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(_UpperCAmelCase ) )
logger.info('''All the splits matched successfully.''' )
def UpperCAmelCase_ ( _UpperCAmelCase :str , _UpperCAmelCase :bool = True ) -> dict:
'''simple docstring'''
if record_checksum:
A_ = shaaaa()
with open(_UpperCAmelCase , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'''''' ):
m.update(_UpperCAmelCase )
A_ = m.hexdigest()
else:
A_ = None
return {"num_bytes": os.path.getsize(_UpperCAmelCase ), "checksum": checksum}
def UpperCAmelCase_ ( _UpperCAmelCase :Optional[int] ) -> Any:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 188 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = (KDPMaDiscreteScheduler,)
__lowerCAmelCase : Union[str, Any] = 10
def __UpperCAmelCase ( self ,**__snake_case ):
"""simple docstring"""
A_ = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**__snake_case )
return config
def __UpperCAmelCase ( self ):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__snake_case ,beta_end=__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
A_ = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ = sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
A_ = scheduler.scale_model_input(__snake_case ,__snake_case )
A_ = model(__snake_case ,__snake_case )
A_ = scheduler.step(__snake_case ,__snake_case ,__snake_case )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(__snake_case ) )
A_ = torch.mean(torch.abs(__snake_case ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def __UpperCAmelCase ( self ):
"""simple docstring"""
if torch_device == "mps":
return
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ = sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
A_ = scheduler.scale_model_input(__snake_case ,__snake_case )
A_ = model(__snake_case ,__snake_case )
A_ = scheduler.step(__snake_case ,__snake_case ,__snake_case )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(__snake_case ) )
A_ = torch.mean(torch.abs(__snake_case ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def __UpperCAmelCase ( self ):
"""simple docstring"""
if torch_device == "mps":
return
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps ,device=__snake_case )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter.to(__snake_case ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A_ = scheduler.scale_model_input(__snake_case ,__snake_case )
A_ = model(__snake_case ,__snake_case )
A_ = scheduler.step(__snake_case ,__snake_case ,__snake_case )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(__snake_case ) )
A_ = torch.mean(torch.abs(__snake_case ) )
if str(__snake_case ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 188 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class a__( __lowerCAmelCase ):
a_ : torch.FloatTensor
a_ : torch.FloatTensor
class a__( __lowerCAmelCase , __lowerCAmelCase ):
a_ : Dict = 1
@register_to_config
def __init__( self , _UpperCAmelCase = 2000 , _UpperCAmelCase = 0.15 , _UpperCAmelCase = 0.01 , _UpperCAmelCase = 1_348.0 , _UpperCAmelCase = 1E-5 , _UpperCAmelCase = 1 , ) -> str:
# standard deviation of the initial noise distribution
snake_case__ =sigma_max
# setable values
snake_case__ =None
self.set_sigmas(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> torch.FloatTensor:
return sample
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> Optional[Any]:
snake_case__ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
snake_case__ =torch.linspace(1 , lowerCAmelCase_ , lowerCAmelCase_ , device=lowerCAmelCase_ )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> int:
snake_case__ =sigma_min if sigma_min is not None else self.config.sigma_min
snake_case__ =sigma_max if sigma_max is not None else self.config.sigma_max
snake_case__ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case__ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
snake_case__ =torch.exp(torch.linspace(math.log(lowerCAmelCase_ ) , math.log(lowerCAmelCase_ ) , lowerCAmelCase_ ) )
snake_case__ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
snake_case__ =timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
snake_case__ =(timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
snake_case__ =timesteps.to(self.discrete_sigmas.device )
snake_case__ =self.discrete_sigmas[timesteps].to(sample.device )
snake_case__ =self.get_adjacent_sigma(lowerCAmelCase_ , lowerCAmelCase_ ).to(sample.device )
snake_case__ =torch.zeros_like(lowerCAmelCase_ )
snake_case__ =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
snake_case__ =diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
snake_case__ =diffusion.unsqueeze(-1 )
snake_case__ =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
snake_case__ =randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase_ , device=sample.device , dtype=sample.dtype )
snake_case__ =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
snake_case__ =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase_ , prev_sample_mean=lowerCAmelCase_ )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
snake_case__ =randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
snake_case__ =torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
snake_case__ =torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
snake_case__ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
snake_case__ =step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
snake_case__ =step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
snake_case__ =step_size.unsqueeze(-1 )
snake_case__ =sample + step_size * model_output
snake_case__ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case__ =timesteps.to(original_samples.device )
snake_case__ =self.discrete_sigmas.to(original_samples.device )[timesteps]
snake_case__ =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase_ ) * sigmas[:, None, None, None]
)
snake_case__ =noise + original_samples
return noisy_samples
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 717 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class a__:
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase="None" , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> List[Any]:
snake_case__ =parent
snake_case__ =batch_size
snake_case__ =seq_length
snake_case__ =is_training
snake_case__ =use_input_mask
snake_case__ =use_token_type_ids
snake_case__ =use_labels
snake_case__ =vocab_size
snake_case__ =hidden_size
snake_case__ =num_hidden_layers
snake_case__ =num_attention_heads
snake_case__ =intermediate_size
snake_case__ =hidden_act
snake_case__ =hidden_dropout_prob
snake_case__ =attention_probs_dropout_prob
snake_case__ =max_position_embeddings
snake_case__ =type_vocab_size
snake_case__ =type_sequence_label_size
snake_case__ =initializer_range
snake_case__ =num_labels
snake_case__ =num_choices
snake_case__ =relative_attention
snake_case__ =position_biased_input
snake_case__ =pos_att_type
snake_case__ =scope
def _lowercase ( self ) -> str:
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ =None
if self.use_input_mask:
snake_case__ =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ =None
if self.use_token_type_ids:
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ =None
snake_case__ =None
snake_case__ =None
if self.use_labels:
snake_case__ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ =DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
snake_case__ =TFDebertaVaModel(config=_UpperCAmelCase )
snake_case__ ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
snake_case__ =[input_ids, input_mask]
snake_case__ =model(_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
snake_case__ =TFDebertaVaForMaskedLM(config=_UpperCAmelCase )
snake_case__ ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case__ =model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
snake_case__ =self.num_labels
snake_case__ =TFDebertaVaForSequenceClassification(config=_UpperCAmelCase )
snake_case__ ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case__ =model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
snake_case__ =self.num_labels
snake_case__ =TFDebertaVaForTokenClassification(config=_UpperCAmelCase )
snake_case__ ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case__ =model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
snake_case__ =TFDebertaVaForQuestionAnswering(config=_UpperCAmelCase )
snake_case__ ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case__ =model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self ) -> str:
snake_case__ =self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) =config_and_inputs
snake_case__ ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a__( snake_case__ , snake_case__ , unittest.TestCase ):
a_ : Optional[Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
a_ : List[Any] = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ : str = False
a_ : Any = False
def _lowercase ( self ) -> str:
snake_case__ =TFDebertaVaModelTester(self )
snake_case__ =ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _lowercase ( self ) -> int:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def _lowercase ( self ) -> Dict:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def _lowercase ( self ) -> int:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def _lowercase ( self ) -> List[str]:
snake_case__ =TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(_UpperCAmelCase )
@require_tf
class a__( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def _lowercase ( self ) -> Optional[Any]:
pass
@slow
def _lowercase ( self ) -> Dict:
snake_case__ =TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
snake_case__ =tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
snake_case__ =tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case__ =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
snake_case__ =tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 )
| 581 | 0 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
__lowercase : Optional[Any] = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
__lowercase : Optional[int] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
__lowercase : int = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case, snake_case = None, snake_case = False, ):
if label_map is not None:
for old_id, new_id in label_map.items():
__snake_case = new_id
# turn into Numpy arrays
__snake_case = np.array(A__)
__snake_case = np.array(A__)
if reduce_labels:
__snake_case = 2_55
__snake_case = label - 1
__snake_case = 2_55
__snake_case = label != ignore_index
__snake_case = np.not_equal(A__, A__)
__snake_case = pred_label[mask]
__snake_case = np.array(A__)[mask]
__snake_case = pred_label[pred_label == label]
__snake_case = np.histogram(A__, bins=A__, range=(0, num_labels - 1))[0]
__snake_case = np.histogram(A__, bins=A__, range=(0, num_labels - 1))[0]
__snake_case = np.histogram(A__, bins=A__, range=(0, num_labels - 1))[0]
__snake_case = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case, snake_case = None, snake_case = False, ):
__snake_case = np.zeros((num_labels,), dtype=np.floataa)
__snake_case = np.zeros((num_labels,), dtype=np.floataa)
__snake_case = np.zeros((num_labels,), dtype=np.floataa)
__snake_case = np.zeros((num_labels,), dtype=np.floataa)
for result, gt_seg_map in zip(A__, A__):
__snake_case , __snake_case , __snake_case , __snake_case = intersect_and_union(
A__, A__, A__, A__, A__, A__)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case, snake_case = None, snake_case = None, snake_case = False, ):
__snake_case , __snake_case , __snake_case , __snake_case = total_intersect_and_union(
A__, A__, A__, A__, A__, A__)
# compute metrics
__snake_case = {}
__snake_case = total_area_intersect.sum() / total_area_label.sum()
__snake_case = total_area_intersect / total_area_union
__snake_case = total_area_intersect / total_area_label
__snake_case = np.nanmean(A__)
__snake_case = np.nanmean(A__)
__snake_case = all_acc
__snake_case = iou
__snake_case = acc
if nan_to_num is not None:
__snake_case = {metric: np.nan_to_num(A__, nan=A__) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self : str ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def lowercase ( self : str , A_ : List[str] , A_ : List[Any] , A_ : Any , A_ : Optional[int] , A_ : List[Any] = None , A_ : Optional[Any] = None , A_ : str = False , ) -> Optional[int]:
__snake_case = mean_iou(
results=_snake_case , gt_seg_maps=_snake_case , num_labels=_snake_case , ignore_index=_snake_case , nan_to_num=_snake_case , label_map=_snake_case , reduce_labels=_snake_case , )
return iou_result | 564 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__magic_name__ = logging.get_logger(__name__)
def _lowerCAmelCase ( A__: str=None , A__: List[Any]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=A__ )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
__SCREAMING_SNAKE_CASE = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
__SCREAMING_SNAKE_CASE = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Use FP16 to accelerate inference."""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Benchmark training of model"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Verbose memory tracing"""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Trace memory line by line"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Save result to a CSV file"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Save all print statements in a log file"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Whether to print environment information"""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
__SCREAMING_SNAKE_CASE = field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
__SCREAMING_SNAKE_CASE = field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
__SCREAMING_SNAKE_CASE = field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
__SCREAMING_SNAKE_CASE = field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving environment information."""} , )
__SCREAMING_SNAKE_CASE = field(
default=F'''log_{round(time() )}.csv''' , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
__SCREAMING_SNAKE_CASE = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _snake_case , )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 254 | 0 |
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ :Union[str, Any] = logging.get_logger(__name__)
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str], _snake_case : List[str]="</s>", _snake_case : Union[str, Any]="<unk>", _snake_case : Tuple="<pad>", _snake_case : Optional[int]=1_2_5, _snake_case : Dict=None, **_snake_case : List[Any], ) ->None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case__ : Union[str, Any] = [F'''<extra_id_{i}>''' for i in range(_snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case__ : Tuple = len(set(filter(lambda _snake_case : bool('extra_id' in str(_snake_case ) ), _snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
snake_case__ : Optional[Any] = AddedToken(_snake_case, lstrip=_snake_case, rstrip=_snake_case ) if isinstance(_snake_case, _snake_case ) else pad_token
snake_case__ : Dict = AddedToken(_snake_case, lstrip=_snake_case, rstrip=_snake_case ) if isinstance(_snake_case, _snake_case ) else eos_token
snake_case__ : Tuple = AddedToken(_snake_case, lstrip=_snake_case, rstrip=_snake_case ) if isinstance(_snake_case, _snake_case ) else unk_token
super().__init__(
eos_token=_snake_case, unk_token=_snake_case, pad_token=_snake_case, extra_ids=_snake_case, additional_special_tokens=_snake_case, **_snake_case, )
snake_case__ : List[Any] = extra_ids
snake_case__ : str = 2**8 # utf is 8 bits
# define special tokens dict
snake_case__ : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
snake_case__ : List[str] = len(self.special_tokens_encoder )
snake_case__ : Dict = len(_snake_case )
for i, token in enumerate(_snake_case ):
snake_case__ : Optional[Any] = self.vocab_size + i - n
snake_case__ : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowercase_ ( self : Optional[int] ) ->Tuple:
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowercase_ ( self : List[str], _snake_case : List[int], _snake_case : Optional[List[int]] = None, _snake_case : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case, token_ids_a=_snake_case, already_has_special_tokens=_snake_case )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_snake_case )) + [1]
return ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
def lowercase_ ( self : List[Any], _snake_case : List[int] ) ->List[int]:
if len(_snake_case ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowercase_ ( self : List[str], _snake_case : List[int], _snake_case : Optional[List[int]] = None ) ->List[int]:
snake_case__ : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowercase_ ( self : Dict, _snake_case : List[int], _snake_case : Optional[List[int]] = None ) ->List[int]:
snake_case__ : str = self._add_eos_if_not_present(_snake_case )
if token_ids_a is None:
return token_ids_a
else:
snake_case__ : Tuple = self._add_eos_if_not_present(_snake_case )
return token_ids_a + token_ids_a
def lowercase_ ( self : Union[str, Any], _snake_case : str ) ->List[str]:
snake_case__ : int = [chr(_snake_case ) for i in text.encode('utf-8' )]
return tokens
def lowercase_ ( self : Optional[int], _snake_case : Dict ) ->str:
if token in self.special_tokens_encoder:
snake_case__ : List[Any] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
snake_case__ : str = self.added_tokens_encoder[token]
elif len(_snake_case ) != 1:
snake_case__ : List[str] = self.unk_token_id
else:
snake_case__ : Optional[int] = ord(_snake_case ) + self._num_special_tokens
return token_id
def lowercase_ ( self : Dict, _snake_case : str ) ->List[Any]:
if index in self.special_tokens_decoder:
snake_case__ : int = self.special_tokens_decoder[index]
else:
snake_case__ : Union[str, Any] = chr(index - self._num_special_tokens )
return token
def lowercase_ ( self : List[Any], _snake_case : Tuple ) ->str:
snake_case__ : Union[str, Any] = b''
for token in tokens:
if token in self.special_tokens_decoder:
snake_case__ : Union[str, Any] = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
snake_case__ : Optional[Any] = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
snake_case__ : Tuple = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
snake_case__ : str = token.encode('utf-8' )
else:
snake_case__ : Tuple = bytes([ord(_snake_case )] )
bstring += tok_string
snake_case__ : Optional[Any] = bstring.decode('utf-8', errors='ignore' )
return string
def lowercase_ ( self : Tuple, _snake_case : str, _snake_case : Optional[str] = None ) ->Tuple[str]:
return ()
| 719 |
import itertools
import math
def lowercase_ (A : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase_ ():
snake_case__ : int = 2
while True:
if is_prime(A ):
yield num
num += 1
def lowercase_ (A : int = 1_0_0_0_1 ):
return next(itertools.islice(prime_generator() , nth - 1 , A ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 243 | 0 |
"""simple docstring"""
import os
import sys
UpperCamelCase : Optional[int] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase : Dict = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 690 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ :
def __init__( self : Optional[Any] , _lowercase : int=2 , _lowercase : Optional[Any]=3 , _lowercase : Any=64 , _lowercase : Tuple=None ):
A = np.random.default_rng(_lowercase )
A = length
A = rng.normal(size=(length,) ).astype(np.floataa )
A = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ):
return self.length
def __getitem__( self : List[str] , _lowercase : int ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[int] , _lowercase : Any=0 , _lowercase : List[Any]=0 , _lowercase : Optional[int]=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A = True
def __a ( self : Optional[Any] , _lowercase : str=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a[0] + self.b[0]
class lowerCamelCase__ ( torch.nn.Module ):
def __init__( self : Optional[Any] , _lowercase : Any=0 , _lowercase : List[str]=0 , _lowercase : str=False ):
super().__init__()
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = torch.nn.Parameter(torch.tensor(_lowercase ).float() )
A = True
def __a ( self : int , _lowercase : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
A = False
return x * self.a + self.b
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 16 ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
A = AutoTokenizer.from_pretrained('bert-base-cased' )
A = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
A = load_dataset('csv' , data_files=UpperCamelCase__ )
A = datasets['train'].unique('label' )
A = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
if "label" in examples:
A = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
A = DataLoader(tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 690 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'nllb-moe'
lowercase_ = ['past_key_values']
lowercase_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , UpperCAmelCase_=128112 , UpperCAmelCase_=1024 , UpperCAmelCase_=12 , UpperCAmelCase_=4096 , UpperCAmelCase_=16 , UpperCAmelCase_=12 , UpperCAmelCase_=4096 , UpperCAmelCase_=16 , UpperCAmelCase_=0.05 , UpperCAmelCase_=0.05 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_="relu" , UpperCAmelCase_=1024 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.02 , UpperCAmelCase_=2 , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_="float32" , UpperCAmelCase_=False , UpperCAmelCase_=128 , UpperCAmelCase_=64 , UpperCAmelCase_=4 , UpperCAmelCase_=4 , UpperCAmelCase_=0.001 , UpperCAmelCase_=0.001 , UpperCAmelCase_="all" , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=1.0 , UpperCAmelCase_=0.2 , UpperCAmelCase_=1 , UpperCAmelCase_=0 , UpperCAmelCase_=2 , UpperCAmelCase_=False , **UpperCAmelCase_ , ) -> Optional[int]:
lowerCamelCase : Tuple = vocab_size
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : Optional[Any] = d_model
lowerCamelCase : str = encoder_ffn_dim
lowerCamelCase : Union[str, Any] = encoder_layers
lowerCamelCase : List[Any] = encoder_attention_heads
lowerCamelCase : int = decoder_ffn_dim
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Dict = decoder_attention_heads
lowerCamelCase : Any = dropout
lowerCamelCase : List[Any] = attention_dropout
lowerCamelCase : int = activation_dropout
lowerCamelCase : str = activation_function
lowerCamelCase : Union[str, Any] = init_std
lowerCamelCase : Tuple = encoder_layerdrop
lowerCamelCase : Any = decoder_layerdrop
lowerCamelCase : int = use_cache
lowerCamelCase : Optional[int] = encoder_layers
lowerCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase : Optional[Any] = router_z_loss_coef
lowerCamelCase : Dict = router_aux_loss_coef
lowerCamelCase : Any = decoder_sparse_step
lowerCamelCase : Optional[Any] = encoder_sparse_step
lowerCamelCase : List[str] = num_experts
lowerCamelCase : Any = expert_capacity
lowerCamelCase : Optional[Any] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
lowerCamelCase : List[Any] = router_dtype
lowerCamelCase : str = router_ignore_padding_tokens
lowerCamelCase : Any = batch_prioritized_routing
lowerCamelCase : Optional[Any] = second_expert_policy
lowerCamelCase : Optional[int] = normalize_router_prob_before_dropping
lowerCamelCase : int = moe_eval_capacity_token_fraction
lowerCamelCase : Dict = moe_token_dropout
lowerCamelCase : Optional[Any] = output_router_logits
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 718 |
"""simple docstring"""
def UpperCAmelCase ( a_ ):
'''simple docstring'''
try:
lowerCamelCase : List[str] = float(a_ )
except ValueError:
raise ValueError('Please enter a valid number' )
lowerCamelCase : Dict = decimal - int(a_ )
if fractional_part == 0:
return int(a_ ), 1
else:
lowerCamelCase : Tuple = len(str(a_ ).split('.' )[1] )
lowerCamelCase : int = int(decimal * (10**number_of_frac_digits) )
lowerCamelCase : List[str] = 10**number_of_frac_digits
lowerCamelCase , lowerCamelCase : int = denominator, numerator
while True:
lowerCamelCase : Tuple = dividend % divisor
if remainder == 0:
break
lowerCamelCase , lowerCamelCase : Union[str, Any] = divisor, remainder
lowerCamelCase , lowerCamelCase : Any = numerator / divisor, denominator / divisor
return int(a_ ), int(a_ )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction('67') = }""")
print(F"""{decimal_to_fraction('45.0') = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction('6.25') = }""")
print(F"""{decimal_to_fraction('78td') = }""")
| 133 | 0 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def snake_case ( snake_case : Dict ) -> Dict:
"""simple docstring"""
if not is_accelerate_available():
return method
lowerCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(snake_case ) < version.parse('0.17.0' ):
return method
def wrapper(self : Any , *snake_case : Union[str, Any] , **snake_case : Dict ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *snake_case , **snake_case )
return wrapper
| 284 |
'''simple docstring'''
def snake_case ( snake_case : list , snake_case : list , snake_case : int , snake_case : int , snake_case : int ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = knapsack(snake_case , snake_case , snake_case , snake_case , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase = values[index] + knapsack(
snake_case , snake_case , snake_case , max_weight - weights[index] , index + 1 )
return max(snake_case , snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Tuple = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 501 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowercase__ ( __A: str ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowercase__ ( __A: str ):
'''simple docstring'''
__magic_name__ : List[str] = np.max(_outputs ,axis=-1 ,keepdims=__A )
__magic_name__ : int = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=__A )
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ ='''sigmoid'''
UpperCamelCase__ ='''softmax'''
UpperCamelCase__ ='''none'''
@add_end_docstrings(
_lowerCamelCase ,R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' ,)
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ =False
UpperCamelCase__ =ClassificationFunction.NONE
def __init__( self : Tuple , **lowerCamelCase_ : Tuple ) -> List[Any]:
super().__init__(**lowerCamelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]="" , **lowerCamelCase_ : int ) -> str:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
__magic_name__ : Dict = tokenizer_kwargs
__magic_name__ : Dict = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
__magic_name__ : List[str] = self.model.config.return_all_scores
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) or top_k is None:
__magic_name__ : Dict = top_k
__magic_name__ : List[Any] = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , lowerCamelCase_ , )
if return_all_scores:
__magic_name__ : str = None
else:
__magic_name__ : int = 1
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__magic_name__ : str = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__magic_name__ : List[str] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Optional[int] = super().__call__(*lowerCamelCase_ , **lowerCamelCase_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__magic_name__ : int = '''top_k''' not in kwargs
if isinstance(args[0] , lowerCamelCase_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase_ : Any , **lowerCamelCase_ : int ) -> Dict[str, GenericTensor]:
__magic_name__ : Union[str, Any] = self.framework
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return self.tokenizer(**lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) == 1 and isinstance(inputs[0] , lowerCamelCase_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self : str , lowerCamelCase_ : Dict ) -> Union[str, Any]:
return self.model(**lowerCamelCase_ )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Any=True ) -> int:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__magic_name__ : Union[str, Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__magic_name__ : str = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
__magic_name__ : Dict = self.model.config.function_to_apply
else:
__magic_name__ : Optional[Any] = ClassificationFunction.NONE
__magic_name__ : Any = model_outputs['''logits'''][0]
__magic_name__ : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__magic_name__ : List[str] = sigmoid(lowerCamelCase_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__magic_name__ : Optional[int] = softmax(lowerCamelCase_ )
elif function_to_apply == ClassificationFunction.NONE:
__magic_name__ : Optional[int] = outputs
else:
raise ValueError(F'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__magic_name__ : Union[str, Any] = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(lowerCamelCase_ )
]
if not _legacy:
dict_scores.sort(key=lambda lowerCamelCase_ : x["score"] , reverse=lowerCamelCase_ )
if top_k is not None:
__magic_name__ : Dict = dict_scores[:top_k]
return dict_scores
| 501 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
a__ = ['bert-base-uncased', 'bert-base-cased']
a__ = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class UpperCAmelCase_ ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , _a ) -> int:
super().__init__()
_a : Any = tokenizer
_a : Union[str, Any] = AutoConfig.from_pretrained(_a )
_a : Dict = TFAutoModel.from_config(_a )
def __lowercase ( self , _a ) -> Any:
_a : int = self.tokenizer(_a )
_a : int = self.bert(**_a )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
super().setUp()
_a : Dict = [
BertTokenizer.from_pretrained(_a ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_a : List[Any] = [TFBertTokenizer.from_pretrained(_a ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_a , use_fast_bert_tokenizer=_a )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_a : Dict = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
_a : Dict = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __lowercase ( self ) -> int:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
_a : str = tokenizer(_a , return_tensors='''tf''' , padding='''longest''' )
_a : Any = tf_tokenizer(_a )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def __lowercase ( self ) -> Any:
for tf_tokenizer in self.tf_tokenizers:
_a : List[str] = tf_tokenizer(self.paired_sentences )
_a : Union[str, Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def __lowercase ( self ) -> Optional[int]:
for tf_tokenizer in self.tf_tokenizers:
_a : Optional[Any] = tf.function(_a )
for test_inputs in (self.test_sentences, self.paired_sentences):
_a : Tuple = tf.constant(_a )
_a : List[str] = compiled_tokenizer(_a )
_a : Any = tf_tokenizer(_a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __lowercase ( self ) -> Tuple:
for tf_tokenizer in self.tf_tokenizers:
_a : str = ModelToSave(tokenizer=_a )
_a : List[Any] = tf.convert_to_tensor(self.test_sentences )
_a : Optional[int] = model(_a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_a : Optional[Any] = Path(_a ) / "saved.model"
model.save(_a )
_a : List[Any] = tf.keras.models.load_model(_a )
_a : Optional[Any] = loaded_model(_a )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 14 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def _lowerCamelCase ( self :Tuple , a :float ) -> float:
return 0.0
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int) -> tuple[int | float, int | float]:
'''simple docstring'''
__UpperCamelCase : List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1])])
__UpperCamelCase : Any = max([20, np.max(fft_results[1 : samplerate // 2 - 1])])
return lowest, highest
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : FilterType , _lowerCamelCase : int) -> None:
'''simple docstring'''
__UpperCamelCase : List[str] = 512
__UpperCamelCase : List[Any] = [1] + [0] * (size - 1)
__UpperCamelCase : List[Any] = [filter_type.process(_lowerCamelCase) for item in inputs]
__UpperCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase : Optional[int] = np.abs(np.fft.fft(_lowerCamelCase))
__UpperCamelCase : Optional[int] = 20 * np.logaa(_lowerCamelCase)
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1)
plt.xlabel("Frequency (Hz)")
plt.xscale("log")
# Display within reasonable bounds
__UpperCamelCase : Optional[Any] = get_bounds(_lowerCamelCase , _lowerCamelCase)
plt.ylim(max([-80, bounds[0]]) , min([80, bounds[1]]))
plt.ylabel("Gain (dB)")
plt.plot(_lowerCamelCase)
plt.show()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : FilterType , _lowerCamelCase : int) -> None:
'''simple docstring'''
__UpperCamelCase : Any = 512
__UpperCamelCase : Dict = [1] + [0] * (size - 1)
__UpperCamelCase : Tuple = [filter_type.process(_lowerCamelCase) for item in inputs]
__UpperCamelCase : Dict = [0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase : Optional[int] = np.angle(np.fft.fft(_lowerCamelCase))
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1)
plt.xlabel("Frequency (Hz)")
plt.xscale("log")
plt.ylim(-2 * pi , 2 * pi)
plt.ylabel("Phase shift (Radians)")
plt.plot(np.unwrap(_lowerCamelCase , -2 * pi))
plt.show() | 557 | 0 |
import math
def _a ( lowerCamelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( lowerCamelCase__ = 0.1 ) -> int:
lowerCamelCase_ : str = 3
lowerCamelCase_ : Union[str, Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCamelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
from __future__ import annotations
def _a ( lowerCamelCase__ ) -> list[int]:
lowerCamelCase_ : List[Any] = [True] * limit
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : int = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ : Union[str, Any] = i * 2
while index < limit:
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = index + i
lowerCamelCase_ : Union[str, Any] = [2]
for i in range(3 , lowerCamelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase__ )
return primes
def _a ( lowerCamelCase__ = 1_00_00_00 ) -> int:
lowerCamelCase_ : Optional[Any] = prime_sieve(lowerCamelCase__ )
lowerCamelCase_ : Any = 0
lowerCamelCase_ : Any = 0
for i in range(len(lowerCamelCase__ ) ):
for j in range(i + length , len(lowerCamelCase__ ) ):
lowerCamelCase_ : Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ : Tuple = j - i
lowerCamelCase_ : Optional[Any] = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 144 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( A__ , A__ , A__ , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 430 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = BlipImageProcessor()
UpperCamelCase = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
UpperCamelCase = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
UpperCamelCase = InstructBlipProcessor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def A ( self : Tuple , **UpperCamelCase__ : str ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer
def A ( self : Any , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def A ( self : List[Any] , **UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).qformer_tokenizer
def A ( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCamelCase = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
UpperCamelCase = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(UpperCamelCase__ , return_tensors='np' )
UpperCamelCase = processor(images=UpperCamelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = processor(text=UpperCamelCase__ )
UpperCamelCase = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
UpperCamelCase = qformer_tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.batch_decode(UpperCamelCase__ )
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 430 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowerCAmelCase__ = [[1, 2, 4], [1, 2, 3, 4]]
lowerCAmelCase__ = DisjunctiveConstraint(a_ )
self.assertTrue(isinstance(dc.token_ids ,a_ ) )
with self.assertRaises(a_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(a_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowerCAmelCase__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(a_ ):
DisjunctiveConstraint(a_ ) # fails here
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = [[1, 2, 3], [1, 2, 4]]
lowerCAmelCase__ = DisjunctiveConstraint(a_ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(1 )
lowerCAmelCase__ = stepped is True and completed is False and reset is False
self.assertTrue(a_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(2 )
lowerCAmelCase__ = stepped is True and completed is False and reset is False
self.assertTrue(a_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(3 )
lowerCAmelCase__ = stepped is True and completed is True and reset is False
self.assertTrue(a_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCAmelCase__ = DisjunctiveConstraint(a_ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 604 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __snake_case ( SCREAMING_SNAKE_CASE ):
def __init__( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = params
lowerCAmelCase__ = np.array(a_ )
lowerCAmelCase__ = np.array([len(a_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self ,a_ ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
"""simple docstring"""
return len(self.lengths )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.params.max_model_input_size
lowerCAmelCase__ = self.lengths > max_len
logger.info(f'Splitting {sum(a_ )} too long sequences.' )
def divide_chunks(a_ ,a_ ):
return [l[i : i + n] for i in range(0 ,len(a_ ) ,a_ )]
lowerCAmelCase__ = []
lowerCAmelCase__ = []
if self.params.mlm:
lowerCAmelCase__ , lowerCAmelCase__ = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowerCAmelCase__ , lowerCAmelCase__ = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids ,self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowerCAmelCase__ = []
for sub_s in divide_chunks(seq_ ,max_len - 2 ):
if sub_s[0] != cls_id:
lowerCAmelCase__ = np.insert(a_ ,0 ,a_ )
if sub_s[-1] != sep_id:
lowerCAmelCase__ = np.insert(a_ ,len(a_ ) ,a_ )
assert len(a_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(a_ )
new_tok_ids.extend(a_ )
new_lengths.extend([len(a_ ) for l in sub_seqs] )
lowerCAmelCase__ = np.array(a_ )
lowerCAmelCase__ = np.array(a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = len(self )
lowerCAmelCase__ = self.lengths > 11
lowerCAmelCase__ = self.token_ids[indices]
lowerCAmelCase__ = self.lengths[indices]
lowerCAmelCase__ = len(self )
logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowerCAmelCase__ = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ = len(self )
lowerCAmelCase__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowerCAmelCase__ = (unk_occs / self.lengths) < 0.5
lowerCAmelCase__ = self.token_ids[indices]
lowerCAmelCase__ = self.lengths[indices]
lowerCAmelCase__ = len(self )
logger.info(f'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(f'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = [t[0] for t in batch]
lowerCAmelCase__ = [t[1] for t in batch]
assert len(a_ ) == len(a_ )
# Max for paddings
lowerCAmelCase__ = max(a_ )
# Pad token ids
if self.params.mlm:
lowerCAmelCase__ = self.params.special_tok_ids['pad_token']
else:
lowerCAmelCase__ = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ = [list(t.astype(a_ ) ) + [pad_idx] * (max_seq_len_ - len(a_ )) for t in token_ids]
assert len(tk_ ) == len(a_ )
assert all(len(a_ ) == max_seq_len_ for t in tk_ )
lowerCAmelCase__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowerCAmelCase__ = torch.tensor(a_ ) # (bs)
return tk_t, lg_t
| 604 | 1 |
'''simple docstring'''
def __snake_case ( lowerCAmelCase : list[int] ):
__UpperCAmelCase = []
if len(lowerCAmelCase ) == 1:
return [nums.copy()]
for _ in range(len(lowerCAmelCase ) ):
__UpperCAmelCase = nums.pop(0 )
__UpperCAmelCase = permute(lowerCAmelCase )
for perm in permutations:
perm.append(lowerCAmelCase )
result.extend(lowerCAmelCase )
nums.append(lowerCAmelCase )
return result
def __snake_case ( lowerCAmelCase : Any ):
def backtrack(lowerCAmelCase : Optional[Any] ):
if start == len(lowerCAmelCase ) - 1:
output.append(nums[:] )
else:
for i in range(lowerCAmelCase , len(lowerCAmelCase ) ):
__UpperCAmelCase , __UpperCAmelCase = nums[i], nums[start]
backtrack(start + 1 )
__UpperCAmelCase , __UpperCAmelCase = nums[i], nums[start] # backtrack
__UpperCAmelCase = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
_UpperCamelCase : List[str] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 396 | '''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = torch.device('cpu')
def __snake_case ( ):
__UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
def __snake_case ( lowerCAmelCase : Tuple ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def __snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
__UpperCAmelCase = dct.pop(lowerCAmelCase )
__UpperCAmelCase = val
def __snake_case ( lowerCAmelCase : Optional[int] ):
__UpperCAmelCase = []
for k in state_dict.keys():
__UpperCAmelCase = k
if ".pwconv" in k:
__UpperCAmelCase = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
__UpperCAmelCase = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
__UpperCAmelCase = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
__UpperCAmelCase = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
__UpperCAmelCase = k_new.split('.' )
if ls[2].isdigit():
__UpperCAmelCase = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
__UpperCAmelCase = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __snake_case ( lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ):
__UpperCAmelCase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__UpperCAmelCase = 1000
__UpperCAmelCase = 'huggingface/label-files'
__UpperCAmelCase = 'imagenet-1k-id2label.json'
__UpperCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
__UpperCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase = idalabel
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__UpperCAmelCase = [3, 3, 6, 4]
__UpperCAmelCase = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__UpperCAmelCase = [3, 3, 9, 6]
__UpperCAmelCase = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__UpperCAmelCase = [4, 3, 10, 5]
__UpperCAmelCase = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__UpperCAmelCase = [4, 4, 12, 6]
__UpperCAmelCase = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
__UpperCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location='cpu' , check_hash=lowerCAmelCase )
else:
__UpperCAmelCase = torch.load(lowerCAmelCase , map_location='cpu' )
__UpperCAmelCase = checkpoint
__UpperCAmelCase = create_rename_keys(lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# load HuggingFace model
__UpperCAmelCase = SwiftFormerForImageClassification(lowerCAmelCase ).eval()
hf_model.load_state_dict(lowerCAmelCase )
# prepare test inputs
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = ViTImageProcessor.from_pretrained('preprocessor_config' )
__UpperCAmelCase = processor(images=lowerCAmelCase , return_tensors='pt' )
# compare outputs from both models
__UpperCAmelCase = get_expected_output(lowerCAmelCase )
__UpperCAmelCase = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , lowerCAmelCase , atol=1E-3 )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 396 | 1 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCamelCase ( A ):
'''simple docstring'''
a_ : torch.FloatTensor
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , _lowerCamelCase : str=3 , _lowerCamelCase : Dict=3 , _lowerCamelCase : Optional[int]=("DownEncoderBlock2D",) , _lowerCamelCase : List[str]=(6_4,) , _lowerCamelCase : List[str]=2 , _lowerCamelCase : Optional[int]=3_2 , _lowerCamelCase : int="silu" , _lowerCamelCase : str=True , ):
'''simple docstring'''
super().__init__()
__lowerCamelCase : List[str] = layers_per_block
__lowerCamelCase : Optional[int] = torch.nn.Convad(
_lowerCamelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[int] = nn.ModuleList([] )
# down
__lowerCamelCase : Dict = block_out_channels[0]
for i, down_block_type in enumerate(_lowerCamelCase ):
__lowerCamelCase : Optional[Any] = output_channel
__lowerCamelCase : Dict = block_out_channels[i]
__lowerCamelCase : List[Any] = i == len(_lowerCamelCase ) - 1
__lowerCamelCase : Dict = get_down_block(
_lowerCamelCase , num_layers=self.layers_per_block , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=_lowerCamelCase , resnet_groups=_lowerCamelCase , attention_head_dim=_lowerCamelCase , temb_channels=_lowerCamelCase , )
self.down_blocks.append(_lowerCamelCase )
# mid
__lowerCamelCase : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_lowerCamelCase , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowerCamelCase , temb_channels=_lowerCamelCase , )
# out
__lowerCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_lowerCamelCase , eps=1E-6 )
__lowerCamelCase : Optional[int] = nn.SiLU()
__lowerCamelCase : Dict = 2 * out_channels if double_z else out_channels
__lowerCamelCase : Tuple = nn.Convad(block_out_channels[-1] , _lowerCamelCase , 3 , padding=1 )
__lowerCamelCase : Any = False
def _snake_case ( self : Optional[int] , _lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCamelCase : Tuple = x
__lowerCamelCase : int = self.conv_in(_lowerCamelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowerCamelCase : Tuple ):
def custom_forward(*_lowerCamelCase : Any ):
return module(*_lowerCamelCase )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
__lowerCamelCase : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowerCamelCase ) , _lowerCamelCase , use_reentrant=_lowerCamelCase )
# middle
__lowerCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowerCamelCase , use_reentrant=_lowerCamelCase )
else:
for down_block in self.down_blocks:
__lowerCamelCase : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowerCamelCase ) , _lowerCamelCase )
# middle
__lowerCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _lowerCamelCase )
else:
# down
for down_block in self.down_blocks:
__lowerCamelCase : List[Any] = down_block(_lowerCamelCase )
# middle
__lowerCamelCase : int = self.mid_block(_lowerCamelCase )
# post-process
__lowerCamelCase : str = self.conv_norm_out(_lowerCamelCase )
__lowerCamelCase : int = self.conv_act(_lowerCamelCase )
__lowerCamelCase : Dict = self.conv_out(_lowerCamelCase )
return sample
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : List[Any]=("UpDecoderBlock2D",) , _lowerCamelCase : str=(6_4,) , _lowerCamelCase : str=2 , _lowerCamelCase : int=3_2 , _lowerCamelCase : Union[str, Any]="silu" , _lowerCamelCase : Dict="group" , ):
'''simple docstring'''
super().__init__()
__lowerCamelCase : Optional[int] = layers_per_block
__lowerCamelCase : int = nn.Convad(
_lowerCamelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__lowerCamelCase : Tuple = None
__lowerCamelCase : Optional[Any] = nn.ModuleList([] )
__lowerCamelCase : Any = in_channels if norm_type == """spatial""" else None
# mid
__lowerCamelCase : Any = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_lowerCamelCase , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowerCamelCase , temb_channels=_lowerCamelCase , )
# up
__lowerCamelCase : List[Any] = list(reversed(_lowerCamelCase ) )
__lowerCamelCase : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_lowerCamelCase ):
__lowerCamelCase : Dict = output_channel
__lowerCamelCase : str = reversed_block_out_channels[i]
__lowerCamelCase : Tuple = i == len(_lowerCamelCase ) - 1
__lowerCamelCase : Tuple = get_up_block(
_lowerCamelCase , num_layers=self.layers_per_block + 1 , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=_lowerCamelCase , resnet_groups=_lowerCamelCase , attention_head_dim=_lowerCamelCase , temb_channels=_lowerCamelCase , resnet_time_scale_shift=_lowerCamelCase , )
self.up_blocks.append(_lowerCamelCase )
__lowerCamelCase : Tuple = output_channel
# out
if norm_type == "spatial":
__lowerCamelCase : Union[str, Any] = SpatialNorm(block_out_channels[0] , _lowerCamelCase )
else:
__lowerCamelCase : int = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_lowerCamelCase , eps=1E-6 )
__lowerCamelCase : int = nn.SiLU()
__lowerCamelCase : Dict = nn.Convad(block_out_channels[0] , _lowerCamelCase , 3 , padding=1 )
__lowerCamelCase : int = False
def _snake_case ( self : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ):
'''simple docstring'''
__lowerCamelCase : List[Any] = z
__lowerCamelCase : Union[str, Any] = self.conv_in(_lowerCamelCase )
__lowerCamelCase : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowerCamelCase : int ):
def custom_forward(*_lowerCamelCase : Any ):
return module(*_lowerCamelCase )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
__lowerCamelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowerCamelCase , _lowerCamelCase , use_reentrant=_lowerCamelCase )
__lowerCamelCase : List[str] = sample.to(_lowerCamelCase )
# up
for up_block in self.up_blocks:
__lowerCamelCase : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , use_reentrant=_lowerCamelCase )
else:
# middle
__lowerCamelCase : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Optional[Any] = sample.to(_lowerCamelCase )
# up
for up_block in self.up_blocks:
__lowerCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
else:
# middle
__lowerCamelCase : Dict = self.mid_block(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Optional[int] = sample.to(_lowerCamelCase )
# up
for up_block in self.up_blocks:
__lowerCamelCase : Any = up_block(_lowerCamelCase , _lowerCamelCase )
# post-process
if latent_embeds is None:
__lowerCamelCase : Any = self.conv_norm_out(_lowerCamelCase )
else:
__lowerCamelCase : Any = self.conv_norm_out(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : List[str] = self.conv_act(_lowerCamelCase )
__lowerCamelCase : str = self.conv_out(_lowerCamelCase )
return sample
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : int="random" , _lowerCamelCase : List[Any]=False , _lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
super().__init__()
__lowerCamelCase : str = n_e
__lowerCamelCase : Optional[Any] = vq_embed_dim
__lowerCamelCase : Tuple = beta
__lowerCamelCase : int = legacy
__lowerCamelCase : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__lowerCamelCase : Optional[Any] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
__lowerCamelCase : Union[str, Any] = self.used.shape[0]
__lowerCamelCase : Tuple = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCamelCase : int = self.re_embed
__lowerCamelCase : Tuple = self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""" )
else:
__lowerCamelCase : Dict = n_e
__lowerCamelCase : Union[str, Any] = sane_index_shape
def _snake_case ( self : Tuple , _lowerCamelCase : int ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = inds.shape
assert len(_lowerCamelCase ) > 1
__lowerCamelCase : Optional[Any] = inds.reshape(ishape[0] , -1 )
__lowerCamelCase : List[Any] = self.used.to(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCamelCase : Tuple = match.argmax(-1 )
__lowerCamelCase : Union[str, Any] = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCamelCase : List[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__lowerCamelCase : Any = self.unknown_index
return new.reshape(_lowerCamelCase )
def _snake_case ( self : List[Any] , _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : str = inds.shape
assert len(_lowerCamelCase ) > 1
__lowerCamelCase : str = inds.reshape(ishape[0] , -1 )
__lowerCamelCase : List[str] = self.used.to(_lowerCamelCase )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCamelCase : Dict = 0 # simply set to zero
__lowerCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _lowerCamelCase )
return back.reshape(_lowerCamelCase )
def _snake_case ( self : Any , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
__lowerCamelCase : Union[str, Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCamelCase : Union[str, Any] = torch.argmin(torch.cdist(_lowerCamelCase , self.embedding.weight ) , dim=1 )
__lowerCamelCase : List[str] = self.embedding(_lowerCamelCase ).view(z.shape )
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
__lowerCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCamelCase : Tuple = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCamelCase : str = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCamelCase : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__lowerCamelCase : Optional[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__lowerCamelCase : List[str] = self.remap_to_used(_lowerCamelCase )
__lowerCamelCase : int = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__lowerCamelCase : Dict = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _snake_case ( self : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if self.remap is not None:
__lowerCamelCase : List[str] = indices.reshape(shape[0] , -1 ) # add batch axis
__lowerCamelCase : str = self.unmap_to_all(_lowerCamelCase )
__lowerCamelCase : List[str] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCamelCase : int = self.embedding(_lowerCamelCase )
if shape is not None:
__lowerCamelCase : Dict = z_q.view(_lowerCamelCase )
# reshape back to match original input shape
__lowerCamelCase : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int]=False ):
'''simple docstring'''
__lowerCamelCase : Tuple = parameters
__lowerCamelCase : Union[str, Any] = torch.chunk(_lowerCamelCase , 2 , dim=1 )
__lowerCamelCase : Dict = torch.clamp(self.logvar , -30.0 , 20.0 )
__lowerCamelCase : Dict = deterministic
__lowerCamelCase : List[Any] = torch.exp(0.5 * self.logvar )
__lowerCamelCase : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
__lowerCamelCase : Optional[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _snake_case ( self : List[Any] , _lowerCamelCase : Optional[torch.Generator] = None ):
'''simple docstring'''
__lowerCamelCase : str = randn_tensor(
self.mean.shape , generator=_lowerCamelCase , device=self.parameters.device , dtype=self.parameters.dtype )
__lowerCamelCase : Union[str, Any] = self.mean + self.std * sample
return x
def _snake_case ( self : List[str] , _lowerCamelCase : List[str]=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _snake_case ( self : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCamelCase : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return self.mean
| 708 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( A,unittest.TestCase ):
'''simple docstring'''
a_ : Any = "ssube/stable-diffusion-x4-upscaler-onnx"
def _snake_case ( self : Any , _lowerCamelCase : List[str]=0 ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(_lowerCamelCase ) )
__lowerCamelCase : int = torch.manual_seed(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowerCamelCase : Tuple = self.get_dummy_inputs()
__lowerCamelCase : Dict = pipe(**_lowerCamelCase ).images
__lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase : str = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowerCamelCase : str = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowerCamelCase : Tuple = self.get_dummy_inputs()
__lowerCamelCase : Union[str, Any] = pipe(**_lowerCamelCase ).images
__lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase : int = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowerCamelCase : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowerCamelCase : List[str] = self.get_dummy_inputs()
__lowerCamelCase : List[str] = pipe(**_lowerCamelCase ).images
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase : str = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowerCamelCase : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowerCamelCase : Dict = self.get_dummy_inputs()
__lowerCamelCase : Dict = pipe(**_lowerCamelCase ).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase : List[str] = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowerCamelCase : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowerCamelCase : Optional[int] = self.get_dummy_inputs()
__lowerCamelCase : int = pipe(**_lowerCamelCase ).images
__lowerCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase : Optional[int] = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def _snake_case ( self : str ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : Any = ort.SessionOptions()
__lowerCamelCase : str = False
return options
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowerCamelCase : Optional[Any] = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
__lowerCamelCase : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowerCamelCase : List[Any] = """A fantasy landscape, trending on artstation"""
__lowerCamelCase : str = torch.manual_seed(0 )
__lowerCamelCase : Any = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=_lowerCamelCase , output_type="""np""" , )
__lowerCamelCase : List[str] = output.images
__lowerCamelCase : Any = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase : int = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowerCamelCase : Union[str, Any] = init_image.resize((1_2_8, 1_2_8) )
__lowerCamelCase : str = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
__lowerCamelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowerCamelCase : str = """A fantasy landscape, trending on artstation"""
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : Dict = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=_lowerCamelCase , output_type="""np""" , )
__lowerCamelCase : int = output.images
__lowerCamelCase : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase : List[str] = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 458 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Dict ='''distilbert'''
a : List[str] ={
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _lowerCamelCase=3_0_5_2_2 , _lowerCamelCase=5_1_2 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=1_2 , _lowerCamelCase=7_6_8 , _lowerCamelCase=4 * 7_6_8 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0_2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ):
UpperCamelCase_: Tuple = vocab_size
UpperCamelCase_: str = max_position_embeddings
UpperCamelCase_: Optional[int] = sinusoidal_pos_embds
UpperCamelCase_: Union[str, Any] = n_layers
UpperCamelCase_: Optional[int] = n_heads
UpperCamelCase_: int = dim
UpperCamelCase_: Tuple = hidden_dim
UpperCamelCase_: Any = dropout
UpperCamelCase_: Optional[Any] = attention_dropout
UpperCamelCase_: List[str] = activation
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: Optional[Any] = qa_dropout
UpperCamelCase_: List[str] = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self ):
if self.task == "multiple-choice":
UpperCamelCase_: Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase_: List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 57 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : List[str] = logging.get_logger(__name__)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]:
UpperCamelCase_: Tuple = b.T
UpperCamelCase_: Tuple = np.sum(np.square(UpperCAmelCase__ ) , axis=1 )
UpperCamelCase_: Optional[Any] = np.sum(np.square(UpperCAmelCase__ ) , axis=0 )
UpperCamelCase_: Optional[int] = np.matmul(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: List[Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: List[str] = x.reshape(-1 , 3 )
UpperCamelCase_: Union[str, Any] = squared_euclidean_distance(UpperCAmelCase__ , UpperCAmelCase__ )
return np.argmin(UpperCAmelCase__ , axis=1 )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Any =['''pixel_values''']
def __init__( self , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = True , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
UpperCamelCase_: List[str] = size if size is not None else {'height': 2_5_6, 'width': 2_5_6}
UpperCamelCase_: str = get_size_dict(_lowerCamelCase )
UpperCamelCase_: Any = np.array(_lowerCamelCase ) if clusters is not None else None
UpperCamelCase_: Optional[int] = do_resize
UpperCamelCase_: List[Any] = size
UpperCamelCase_: Optional[int] = resample
UpperCamelCase_: str = do_normalize
UpperCamelCase_: str = do_color_quantize
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCamelCase_: Any = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
_lowerCamelCase , size=(size['height'], size['width']) , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , ):
UpperCamelCase_: Optional[Any] = rescale(image=_lowerCamelCase , scale=1 / 1_2_7.5 , data_format=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = image - 1
return image
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
UpperCamelCase_: Optional[Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_: Tuple = size if size is not None else self.size
UpperCamelCase_: Union[str, Any] = get_size_dict(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = resample if resample is not None else self.resample
UpperCamelCase_: Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: str = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCamelCase_: Dict = clusters if clusters is not None else self.clusters
UpperCamelCase_: Dict = np.array(_lowerCamelCase )
UpperCamelCase_: Optional[int] = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_: Union[str, Any] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase_: Union[str, Any] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase_: Optional[Any] = [self.normalize(image=_lowerCamelCase ) for image in images]
if do_color_quantize:
UpperCamelCase_: Any = [to_channel_dimension_format(_lowerCamelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCamelCase_: Optional[Any] = np.array(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = color_quantize(_lowerCamelCase , _lowerCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCamelCase_: Dict = images.shape[0]
UpperCamelCase_: Any = images.reshape(_lowerCamelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCamelCase_: List[Any] = list(_lowerCamelCase )
else:
UpperCamelCase_: int = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
UpperCamelCase_: str = {'input_ids': images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase ) | 57 | 1 |
def lowerCamelCase ( ) -> List[str]:
_lowerCamelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_lowerCamelCase = 6
_lowerCamelCase = 1
_lowerCamelCase = 19_01
_lowerCamelCase = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCamelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_lowerCamelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCamelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
_lowerCamelCase = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution()) | 234 | import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def lowerCamelCase ( UpperCamelCase : str ) -> List[str]:
_lowerCamelCase = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
_lowerCamelCase = MaskFormerConfig(backbone_config=UpperCamelCase )
_lowerCamelCase = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
_lowerCamelCase = 8_47
_lowerCamelCase = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
_lowerCamelCase = 1_50
_lowerCamelCase = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
_lowerCamelCase = 1_71
_lowerCamelCase = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
_lowerCamelCase = 1_33
_lowerCamelCase = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
_lowerCamelCase = 19
_lowerCamelCase = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
_lowerCamelCase = 65
_lowerCamelCase = 'mapillary-vistas-id2label.json'
_lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase = {int(UpperCamelCase ): v for k, v in idalabel.items()}
return config
def lowerCamelCase ( UpperCamelCase : Any ) -> Any:
_lowerCamelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
_lowerCamelCase = dct.pop(UpperCamelCase )
_lowerCamelCase = val
def lowerCamelCase ( UpperCamelCase : Dict , UpperCamelCase : List[Any] ) -> Union[str, Any]:
_lowerCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_lowerCamelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[:dim, :]
_lowerCamelCase = in_proj_bias[: dim]
_lowerCamelCase = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase = in_proj_weight[
-dim :, :
]
_lowerCamelCase = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase ( UpperCamelCase : int , UpperCamelCase : Union[str, Any] ) -> str:
# fmt: off
_lowerCamelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_lowerCamelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[: hidden_size, :]
_lowerCamelCase = in_proj_bias[:config.hidden_size]
_lowerCamelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase = in_proj_weight[-hidden_size :, :]
_lowerCamelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_lowerCamelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[: hidden_size, :]
_lowerCamelCase = in_proj_bias[:config.hidden_size]
_lowerCamelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase = in_proj_weight[-hidden_size :, :]
_lowerCamelCase = in_proj_bias[-hidden_size :]
# fmt: on
def lowerCamelCase ( ) -> torch.Tensor:
_lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : bool = False ) -> Dict:
_lowerCamelCase = get_maskformer_config(UpperCamelCase )
# load original state_dict
with open(UpperCamelCase , 'rb' ) as f:
_lowerCamelCase = pickle.load(UpperCamelCase )
_lowerCamelCase = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_lowerCamelCase = create_rename_keys(UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_swin_q_k_v(UpperCamelCase , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase , UpperCamelCase )
# update to torch tensors
for key, value in state_dict.items():
_lowerCamelCase = torch.from_numpy(UpperCamelCase )
# load 🤗 model
_lowerCamelCase = MaskFormerForInstanceSegmentation(UpperCamelCase )
model.eval()
for name, param in model.named_parameters():
print(UpperCamelCase , param.shape )
_lowerCamelCase , _lowerCamelCase = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCamelCase ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_lowerCamelCase = prepare_img()
if "vistas" in model_name:
_lowerCamelCase = 65
elif "cityscapes" in model_name:
_lowerCamelCase = 6_55_35
else:
_lowerCamelCase = 2_55
_lowerCamelCase = True if 'ade' in model_name else False
_lowerCamelCase = MaskFormerImageProcessor(ignore_index=UpperCamelCase , reduce_labels=UpperCamelCase )
_lowerCamelCase = image_processor(UpperCamelCase , return_tensors='pt' )
_lowerCamelCase = model(**UpperCamelCase )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_lowerCamelCase = torch.tensor(
[[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
image_processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 234 | 1 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _a :
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = BlenderbotConfig
UpperCAmelCase__: Any = {}
UpperCAmelCase__: Dict = '''gelu'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__=0.1 , A__=0.1 , A__=20 , A__=2 , A__=1 , A__=0 , ):
A__ : Optional[Any] = parent
A__ : List[str] = batch_size
A__ : int = seq_length
A__ : int = is_training
A__ : Optional[int] = use_labels
A__ : Union[str, Any] = vocab_size
A__ : Optional[int] = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : Union[str, Any] = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Tuple = hidden_dropout_prob
A__ : List[str] = attention_probs_dropout_prob
A__ : Union[str, Any] = max_position_embeddings
A__ : str = eos_token_id
A__ : Dict = pad_token_id
A__ : str = bos_token_id
def __A ( self ):
A__ : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : int = prepare_blenderbot_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __A ( self , A__ , A__ ):
A__ : Tuple = TFBlenderbotModel(config=A__ ).get_decoder()
A__ : List[Any] = inputs_dict["""input_ids"""]
A__ : int = input_ids[:1, :]
A__ : Dict = inputs_dict["""attention_mask"""][:1, :]
A__ : List[Any] = inputs_dict["""head_mask"""]
A__ : str = 1
# first forward pass
A__ : Tuple = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
A__ , A__ : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ : int = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ : Dict = model(A__ , attention_mask=A__ )[0]
A__ : Union[str, Any] = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
A__ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1e-3 )
def UpperCamelCase (lowercase_: Dict , lowercase_: Dict , lowercase_: List[Any] , lowercase_: Tuple=None , lowercase_: List[Any]=None , lowercase_: List[Any]=None , lowercase_: Union[str, Any]=None , lowercase_: int=None , ) -> List[Any]:
if attention_mask is None:
A__ : str = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Dict = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
UpperCAmelCase__: Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__: Any = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: Union[str, Any] = True
UpperCAmelCase__: Any = False
UpperCAmelCase__: List[str] = False
def __A ( self ):
A__ : Any = TFBlenderbotModelTester(self )
A__ : str = ConfigTester(self , config_class=A__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_tokenizers
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = ['''My friends are cool but they eat too many carbs.''']
UpperCAmelCase__: List[Any] = '''facebook/blenderbot-400M-distill'''
@cached_property
def __A ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ):
A__ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __A ( self ):
A__ : Optional[Any] = self.tokenizer(self.src_text , return_tensors="""tf""" )
A__ : str = self.model.generate(
model_inputs.input_ids , )
A__ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 456 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
A_ : List[str] = logging.get_logger(__name__)
A_ : List[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
A_ : str = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def UpperCamelCase (lowercase_: List[Any] ) -> List[Any]:
A__ : Optional[Any] = {}
with open(lowercase_ , """r""" ) as file:
for line_number, line in enumerate(lowercase_ ):
A__ : Any = line.strip()
if line:
A__ : Optional[int] = line.split()
A__ : Any = line_number
A__ : Tuple = words[0]
A__ : Union[str, Any] = value
return result
def UpperCamelCase (lowercase_: List[str] , lowercase_: Tuple , lowercase_: int , lowercase_: Optional[int] , lowercase_: Optional[int] ) -> Dict:
for attribute in key.split(""".""" ):
A__ : Optional[int] = getattr(lowercase_ , lowercase_ )
A__ : Optional[int] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase_ ):
A__ : Dict = PARAM_MAPPING[full_name.split(""".""" )[-1]]
A__ : str = """param"""
if weight_type is not None and weight_type != "param":
A__ : List[Any] = getattr(lowercase_ , lowercase_ ).shape
elif weight_type is not None and weight_type == "param":
A__ : Optional[int] = hf_pointer
for attribute in hf_param_name.split(""".""" ):
A__ : Optional[Any] = getattr(lowercase_ , lowercase_ )
A__ : List[Any] = shape_pointer.shape
# let's reduce dimension
A__ : str = value[0]
else:
A__ : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
A__ : Any = value
elif weight_type == "weight_g":
A__ : List[str] = value
elif weight_type == "weight_v":
A__ : Union[str, Any] = value
elif weight_type == "bias":
A__ : Optional[Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
A__ : Dict = getattr(lowercase_ , lowercase_ )
A__ : Tuple = value
else:
A__ : List[str] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase (lowercase_: Any , lowercase_: Optional[Any] , lowercase_: Tuple , lowercase_: Any , lowercase_: Union[str, Any] ) -> Tuple:
A__ : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase_ ):
A__ : List[Any] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
A__ : List[str] = """param"""
if weight_type is not None and weight_type != "param":
A__ : int = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
A__ : Union[str, Any] = """.""".join([key, hf_param_name] )
else:
A__ : Optional[Any] = key
A__ : Tuple = value if """lm_head""" in full_key else value[0]
A_ : List[Any] = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def UpperCamelCase (lowercase_: List[str] , lowercase_: Tuple , lowercase_: Any=None , lowercase_: Optional[int]=None ) -> List[Any]:
A__ : int = False
for key, mapped_key in MAPPING.items():
A__ : int = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A__ : Optional[Any] = True
if "*" in mapped_key:
A__ : List[Any] = name.split(lowercase_ )[0].split(""".""" )[-2]
A__ : int = mapped_key.replace("""*""" , lowercase_ )
if "weight_g" in name:
A__ : List[Any] = """weight_g"""
elif "weight_v" in name:
A__ : int = """weight_v"""
elif "bias" in name:
A__ : Any = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A__ : str = """weight"""
else:
A__ : str = None
if hf_dict is not None:
rename_dict(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return is_used
return is_used
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Optional[int] , lowercase_: Any ) -> Tuple:
A__ : List[Any] = []
A__ : Tuple = fairseq_model.state_dict()
A__ : Any = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
A__ : Any = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == """group""" , )
A__ : Any = True
else:
A__ : Union[str, Any] = load_wavaveca_layer(lowercase_ , lowercase_ , lowercase_ )
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: Any , lowercase_: Union[str, Any] , lowercase_: Any ) -> Dict:
A__ : str = full_name.split("""conv_layers.""" )[-1]
A__ : Optional[Any] = name.split(""".""" )
A__ : Optional[int] = int(items[0] )
A__ : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
A__ : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
A__ : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
A__ : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
A__ : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def UpperCamelCase (lowercase_: List[Any] , lowercase_: int , lowercase_: str=None , lowercase_: Tuple=None , lowercase_: List[str]=True , lowercase_: Any=False ) -> str:
if config_path is not None:
A__ : Any = WavaVecaConfig.from_pretrained(lowercase_ )
else:
A__ : int = WavaVecaConfig()
if is_seq_class:
A__ : Optional[Any] = read_txt_into_dict(lowercase_ )
A__ : List[str] = idalabel
A__ : Any = WavaVecaForSequenceClassification(lowercase_ )
A__ : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
feature_extractor.save_pretrained(lowercase_ )
elif is_finetuned:
if dict_path:
A__ : List[Any] = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A__ : Tuple = target_dict.pad_index
A__ : Union[str, Any] = target_dict.bos_index
A__ : List[Any] = target_dict.eos_index
A__ : List[str] = len(target_dict.symbols )
A__ : str = os.path.join(lowercase_ , """vocab.json""" )
if not os.path.isdir(lowercase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase_ ) )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
A__ : List[str] = 0
A__ : Optional[Any] = 1
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowercase_ , lowercase_ )
A__ : List[str] = WavaVecaCTCTokenizer(
lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase_ , )
A__ : List[Any] = True if config.feat_extract_norm == """layer""" else False
A__ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
A__ : str = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
A__ : Optional[Any] = WavaVecaForCTC(lowercase_ )
else:
A__ : str = WavaVecaForPreTraining(lowercase_ )
if is_finetuned or is_seq_class:
A__ , A__ , A__ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
A__ : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
A__ : Any = fairseq.tasks.setup_task(lowercase_ )
A__ , A__ , A__ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase_ )
A__ : Tuple = model[0].eval()
recursively_load_weights(lowercase_ , lowercase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
A_ : Any = parser.parse_args()
A_ : List[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 456 | 1 |
import os
from pathlib import Path
def _a ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
_lowerCAmelCase = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_lowerCAmelCase = {
'wmt16-en-de-dist-12-1': [2_8.3, 2_7.5_2],
'wmt16-en-de-dist-6-1': [2_7.4, 2_7.1_1],
'wmt16-en-de-12-1': [2_6.9, 2_5.7_5],
}
_lowerCAmelCase = f'''{src_lang}-{tgt_lang}'''
_lowerCAmelCase = f'''\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'''
model_card_dir.mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
_lowerCAmelCase = os.path.join(__UpperCamelCase , 'README.md' )
print(f'''Generating {path}''' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
_UpperCamelCase: Optional[Any] =Path(__file__).resolve().parent.parent.parent
_UpperCamelCase: int =repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_UpperCamelCase: Dict =model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 713 |
def _a ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_lowerCAmelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__SCREAMING_SNAKE_CASE ):
return None
_lowerCAmelCase = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
_lowerCAmelCase = left
_lowerCAmelCase = point
elif point > right:
_lowerCAmelCase = right
_lowerCAmelCase = point
else:
if item < current_item:
_lowerCAmelCase = point - 1
else:
_lowerCAmelCase = point + 1
return None
def _a ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_lowerCAmelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__SCREAMING_SNAKE_CASE ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif point > right:
return interpolation_search_by_recursion(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , point - 1 )
else:
return interpolation_search_by_recursion(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , point + 1 , __SCREAMING_SNAKE_CASE )
def _a ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if collection != sorted(__SCREAMING_SNAKE_CASE ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
_UpperCamelCase: Tuple =0
if debug == 1:
_UpperCamelCase: int =[10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
_UpperCamelCase: str =67
_UpperCamelCase: List[str] =interpolation_search(collection, target)
if result is not None:
print(F"{target} found at positions: {result}")
else:
print('Not found')
| 585 | 0 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=True , a=False , a=False , a=False , a=2 , a=99 , a=0 , a=32 , a=5 , a=4 , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=2 , a=4 , a="last" , a=True , a=None , a=0 , ) -> Tuple:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_lengths
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = gelu_activation
SCREAMING_SNAKE_CASE = sinusoidal_embeddings
SCREAMING_SNAKE_CASE = causal
SCREAMING_SNAKE_CASE = asm
SCREAMING_SNAKE_CASE = n_langs
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_special
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = summary_type
SCREAMING_SNAKE_CASE = use_proj
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , 2).float()
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> List[Any]:
SCREAMING_SNAKE_CASE = XLMModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , lengths=a , langs=a)
SCREAMING_SNAKE_CASE = model(a , langs=a)
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = XLMWithLMHeadModel(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = XLMForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(a , start_positions=a , end_positions=a)
SCREAMING_SNAKE_CASE = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Tuple:
SCREAMING_SNAKE_CASE = XLMForQuestionAnswering(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
SCREAMING_SNAKE_CASE = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((SCREAMING_SNAKE_CASE) , ) = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE = model(a , start_positions=a , end_positions=a)
((SCREAMING_SNAKE_CASE) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Dict:
SCREAMING_SNAKE_CASE = XLMForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> int:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = XLMForTokenClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = XLMForMultipleChoice(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_lowercase : Optional[int] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : Union[str, Any] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowercase : List[Any] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=False) -> List[Any]:
SCREAMING_SNAKE_CASE = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = XLMModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , emb_dim=37)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a=False , a=1) -> List[str]:
self.assertIsInstance(a , a)
self.assertListEqual(
[isinstance(a , a) for iter_attentions in attentions] , [True] * len(a))
self.assertEqual(len(a) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(a):
# adds PAD dummy token
SCREAMING_SNAKE_CASE = min_length + idx + 1
SCREAMING_SNAKE_CASE = min_length + idx + 1
SCREAMING_SNAKE_CASE = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(a))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a=False , a=1) -> Dict:
self.assertIsInstance(a , a)
self.assertListEqual(
[isinstance(a , a) for iter_hidden_states in hidden_states] , [True] * len(a) , )
self.assertEqual(len(a) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(a):
# adds PAD dummy token
SCREAMING_SNAKE_CASE = min_length + idx + 1
SCREAMING_SNAKE_CASE = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(a) , )
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = XLMModel.from_pretrained(a)
self.assertIsNotNone(a)
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(a)
SCREAMING_SNAKE_CASE = torch.tensor([[14, 447]] , dtype=torch.long , device=a) # the president
SCREAMING_SNAKE_CASE = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
SCREAMING_SNAKE_CASE = model.generate(a , do_sample=a)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , a)
| 73 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_UpperCamelCase : Tuple = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 541 | 0 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Any=[] ) -> Dict:
"""simple docstring"""
a_ = size[0] - overlap_pixels * 2
a_ = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
a_ = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
a_ = np.pad(UpperCamelCase , mode="""linear_ramp""" , pad_width=UpperCamelCase , end_values=0 )
if "l" in remove_borders:
a_ = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
a_ = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
a_ = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
a_ = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
return max(UpperCamelCase , min(UpperCamelCase , UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : [int] , UpperCamelCase : [int] , UpperCamelCase : [int] ) -> List[str]:
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : [int] , UpperCamelCase : int , UpperCamelCase : [int] ) -> Optional[Any]:
"""simple docstring"""
a_ = list(UpperCamelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
a_ = clamp_rect(UpperCamelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
a_ = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(UpperCamelCase , (original_slice, 0) )
return result
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Any , UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
a_ = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
a_ = tile.crop(UpperCamelCase )
return tile
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int , UpperCamelCase : List[str] ) -> str:
"""simple docstring"""
a_ = n % d
return n - divisor
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 350 , ):
super().__init__(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , low_res_scheduler=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , max_noise_level=_SCREAMING_SNAKE_CASE , )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
torch.manual_seed(0 )
a_ = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
a_ = add_overlap_rect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , image.size )
a_ = image.crop(_SCREAMING_SNAKE_CASE )
a_ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
a_ = translated_slice_x - (original_image_slice / 2)
a_ = max(0 , _SCREAMING_SNAKE_CASE )
a_ = squeeze_tile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = to_input.size
a_ = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
a_ = super(_SCREAMING_SNAKE_CASE , self ).__call__(image=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).images[0]
a_ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
a_ = unsqueeze_tile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
a_ = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
a_ = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=_SCREAMING_SNAKE_CASE ) , mode="""L""" , )
final_image.paste(
_SCREAMING_SNAKE_CASE , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , _SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 75 , _SCREAMING_SNAKE_CASE = 9.0 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 32 , ):
a_ = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
a_ = math.ceil(image.size[0] / tile_size )
a_ = math.ceil(image.size[1] / tile_size )
a_ = tcx * tcy
a_ = 0
for y in range(_SCREAMING_SNAKE_CASE ):
for x in range(_SCREAMING_SNAKE_CASE ):
self._process_tile(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prompt=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , noise_level=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
a_ = """stabilityai/stable-diffusion-x4-upscaler"""
a_ = StableDiffusionTiledUpscalePipeline.from_pretrained(UpperCamelCase , revision="""fp16""" , torch_dtype=torch.floataa )
a_ = pipe.to("""cuda""" )
a_ = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(UpperCamelCase : Any ):
print(F"""progress: {obj["progress"]:.4f}""" )
obj["image"].save("""diffusers_library_progress.jpg""" )
a_ = pipe(image=UpperCamelCase , prompt="""Black font, white background, vector""" , noise_level=40 , callback=UpperCamelCase )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main() | 403 |
import argparse
import json
import subprocess
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] , UpperCamelCase : Tuple ) -> Dict:
"""simple docstring"""
a_ = []
a_ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
a_ = subprocess.run(UpperCamelCase , shell=UpperCamelCase , stdout=subprocess.PIPE )
a_ = output.stdout.decode("""utf-8""" )
a_ = json.loads(UpperCamelCase )
a_ = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(UpperCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) )
if len(UpperCamelCase ) > 0:
a_ = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] ) -> str:
"""simple docstring"""
return values.split(""",""" )
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_A = parser.parse_args()
get_runner_status(args.target_runners, args.token) | 403 | 1 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase = numpy.array([0, 0])
__lowerCAmelCase = numpy.array([0.5, 0.8660254])
__lowerCAmelCase = numpy.array([1, 0])
__lowerCAmelCase = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _UpperCAmelCase ( __A : list[numpy.ndarray] , __A : int ):
a_ : List[Any] = initial_vectors
for _ in range(__A ):
a_ : Optional[Any] = iteration_step(__A )
return vectors
def _UpperCAmelCase ( __A : list[numpy.ndarray] ):
a_ : str = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ : List[str] = vectors[i + 1]
new_vectors.append(__A )
a_ : Optional[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _UpperCAmelCase ( __A : numpy.ndarray , __A : float ):
a_ : List[str] = numpy.radians(__A )
a_ , a_ : str = numpy.cos(__A ), numpy.sin(__A )
a_ : List[Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__A , __A )
def _UpperCAmelCase ( __A : list[numpy.ndarray] ):
a_ : str = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ : Any = zip(*__A )
plt.plot(__A , __A )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 466 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : str , __A : str ):
a_ : int = get_failure_array(__A )
# 2) Step through text searching for pattern
a_ , a_ : Any = 0, 0 # index into text, pattern
while i < len(__A ):
if pattern[j] == text[i]:
if j == (len(__A ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
a_ : Any = failure[j - 1]
continue
i += 1
return False
def _UpperCAmelCase ( __A : str ):
a_ : Optional[Any] = [0]
a_ : Any = 0
a_ : int = 1
while j < len(__A ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
a_ : List[Any] = failure[i - 1]
continue
j += 1
failure.append(__A )
return failure
if __name__ == "__main__":
# Test 1)
__lowerCAmelCase = 'abc1abc12'
__lowerCAmelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__lowerCAmelCase = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__lowerCAmelCase = 'ABABX'
__lowerCAmelCase = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__lowerCAmelCase = 'AAAB'
__lowerCAmelCase = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__lowerCAmelCase = 'abcdabcy'
__lowerCAmelCase = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__lowerCAmelCase = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 466 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : Tuple = logging.get_logger(__name__)
_A : Any = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class a__ ( a_ ):
__lowerCAmelCase = """xmod"""
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.0_2 , _a=1E-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ):
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
lowercase : Optional[Any] = vocab_size
lowercase : Optional[int] = hidden_size
lowercase : Any = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : Tuple = hidden_act
lowercase : Optional[Any] = intermediate_size
lowercase : Tuple = hidden_dropout_prob
lowercase : int = attention_probs_dropout_prob
lowercase : int = max_position_embeddings
lowercase : List[str] = type_vocab_size
lowercase : int = initializer_range
lowercase : int = layer_norm_eps
lowercase : Optional[Any] = position_embedding_type
lowercase : str = use_cache
lowercase : Union[str, Any] = classifier_dropout
lowercase : Optional[Any] = pre_norm
lowercase : Tuple = adapter_reduction_factor
lowercase : int = adapter_layer_norm
lowercase : List[Any] = adapter_reuse_layer_norm
lowercase : Any = ln_before_adapter
lowercase : Optional[int] = list(_a )
lowercase : List[str] = default_language
class a__ ( a_ ):
@property
def __magic_name__ ( self ):
if self.task == "multiple-choice":
lowercase : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 518 |
"""simple docstring"""
_A : List[str] = 8.3_1_4_4_5_9_8
def __magic_name__ ( __snake_case : float , __snake_case : float ) -> float:
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_A : Union[str, Any] = 3_00
_A : int = 28
_A : Optional[Any] = rms_speed_of_molecule(temperature, molar_mass)
print(F"Vrms of Nitrogen gas at 300 K is {vrms} m/s")
| 518 | 1 |
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_snake_case = '.'
if __name__ == "__main__":
_snake_case = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
_snake_case = []
_snake_case = []
with open(doctest_file_path) as fp:
for line in fp:
_snake_case = line.strip()
_snake_case = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_snake_case = '\n'.join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 245 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a__ ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = ReformerTokenizer
_SCREAMING_SNAKE_CASE : str = ReformerTokenizerFast
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Optional[Any] = True
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_lowercase : int = ReformerTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = "<s>"
_lowercase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_UpperCamelCase ) , 1000 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowercase : str = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : Any = "I was born in 92000, and this is falsé."
_lowercase : Dict = tokenizer.tokenize(_UpperCamelCase )
_lowercase : List[Any] = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_lowercase : Union[str, Any] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_lowercase : int = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_lowercase : Tuple = self.get_rust_tokenizer()
_lowercase : Optional[Any] = tokenizer.encode(_UpperCamelCase )
_lowercase : Any = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase : int = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# Simple input
_lowercase : int = "This is a simple input"
_lowercase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_lowercase : str = ("This is a simple input", "This is a pair")
_lowercase : int = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = ReformerTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
_lowercase : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [285, 46, 10, 170, 382] , )
_lowercase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_lowercase : Dict = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowercase : List[Any] = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = "Hello World!"
_lowercase : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_lowercase : Optional[Any] = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@require_torch
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_lowercase : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowercase : Tuple = " ".join(_UpperCamelCase )
_lowercase : Tuple = self.big_tokenizer.encode_plus(_UpperCamelCase , return_tensors="pt" )
_lowercase : int = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_lowercase : int = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_lowercase : Optional[int] = encoded_sequence["input_ids"].shape
_lowercase : List[Any] = ReformerModel(_UpperCamelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCamelCase )
model(**_UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_lowercase : Dict = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=_UpperCamelCase , sequences=_UpperCamelCase , )
| 245 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCamelCase :int = TypeVar('''KT''')
lowerCamelCase :Union[str, Any] = TypeVar('''VT''')
class _lowerCAmelCase ( Generic[KT, VT] ):
def __init__(self , lowercase = "root" , lowercase = None ):
A_ : Union[str, Any] = key
A_ : int = value
A_ : list[Node[KT, VT]] = []
def __repr__(self ):
return F'Node({self.key}: {self.value})'
@property
def _a (self ):
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
def __init__(self , lowercase = 0.5 , lowercase = 16 ):
A_ : Node[KT, VT] = Node[KT, VT]()
A_ : Tuple = 0
A_ : Tuple = p
A_ : int = max_level
def __str__(self ):
A_ : Union[str, Any] = list(self )
if len(lowercase ) == 0:
return F'SkipList(level={self.level})'
A_ : Dict = max((len(str(lowercase ) ) for item in items) , default=4 )
A_ : str = max(lowercase , 4 ) + 4
A_ : Optional[int] = self.head
A_ : Dict = []
A_ : Optional[Any] = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(lowercase , """-""" ) + """* """ * len(lowercase ) )
lines.append(""" """ * label_size + """| """ * len(lowercase ) )
while len(node.forward ) != 0:
A_ : Optional[Any] = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(lowercase , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(lowercase ) )
A_ : Dict = node.forward
lines.append("""None""".ljust(lowercase ) + """* """ * len(lowercase ) )
return F'SkipList(level={self.level})\n' + "\n".join(lowercase )
def __iter__(self ):
A_ : Optional[int] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
A_ : str = node.forward[0]
def _a (self ):
A_ : Dict = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _a (self , lowercase ):
A_ : Union[str, Any] = []
A_ : List[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
A_ : List[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _a (self , lowercase ):
A_ : List[Any] = self._locate_node(lowercase )
if node is not None:
for i, update_node in enumerate(lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
A_ : List[Any] = node.forward[i]
else:
A_ : List[str] = update_node.forward[:i]
def _a (self , lowercase , lowercase ):
A_ : Union[str, Any] = self._locate_node(lowercase )
if node is not None:
A_ : Any = value
else:
A_ : List[str] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , lowercase ):
update_vector.append(self.head )
A_ : List[str] = level
A_ : List[str] = Node(lowercase , lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowercase )
else:
A_ : str = new_node
def _a (self , lowercase ):
A_ : List[str] = self._locate_node(lowercase )
if node is not None:
return node.value
return None
def a ( ):
'''simple docstring'''
A_ : int = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
A_ : List[Any] = skip_list.head
A_ : List[str] = {}
while node.level != 0:
A_ : Tuple = node.forward[0]
A_ : List[Any] = node.value
assert len(lowerCamelCase__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def a ( ):
'''simple docstring'''
A_ : int = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
A_ : Dict = skip_list.head
A_ : Tuple = {}
while node.level != 0:
A_ : Tuple = node.forward[0]
A_ : Optional[Any] = node.value
if len(lowerCamelCase__ ) != 4:
print()
assert len(lowerCamelCase__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def a ( ):
'''simple docstring'''
A_ : Any = SkipList()
assert skip_list.find("""Some key""" ) is None
def a ( ):
'''simple docstring'''
A_ : Tuple = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def a ( ):
'''simple docstring'''
A_ : Optional[int] = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def a ( ):
'''simple docstring'''
A_ : List[Any] = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def a ( ):
'''simple docstring'''
A_ : List[str] = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def a ( ):
'''simple docstring'''
A_ : Any = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 1_42 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(lowerCamelCase__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(lowerCamelCase__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def a ( ):
'''simple docstring'''
def is_sorted(lowerCamelCase__ ):
return all(next_item >= item for item, next_item in zip(lowerCamelCase__ , lst[1:] ) )
A_ : List[str] = SkipList()
for i in range(10 ):
skip_list.insert(lowerCamelCase__ , lowerCamelCase__ )
assert is_sorted(list(lowerCamelCase__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(lowerCamelCase__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(lowerCamelCase__ ) )
def a ( ):
'''simple docstring'''
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def a ( ):
'''simple docstring'''
A_ : List[Any] = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 707 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :Any = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Any = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCamelCase ( snake_case__ , snake_case__ ):
'''simple docstring'''
__snake_case = """nat"""
__snake_case = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : str=64 , lowerCAmelCase_ : List[Any]=[3, 4, 6, 5] , lowerCAmelCase_ : List[str]=[2, 4, 8, 16] , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : List[Any]=3.0 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : Optional[Any]=0.02 , lowerCAmelCase_ : List[Any]=1e-5 , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : int , ) -> List[Any]:
'''simple docstring'''
super().__init__(**__lowercase )
A__ : Dict =patch_size
A__ : Dict =num_channels
A__ : Any =embed_dim
A__ : List[Any] =depths
A__ : str =len(__lowercase )
A__ : Union[str, Any] =num_heads
A__ : Optional[int] =kernel_size
A__ : Optional[Any] =mlp_ratio
A__ : List[str] =qkv_bias
A__ : List[str] =hidden_dropout_prob
A__ : Optional[int] =attention_probs_dropout_prob
A__ : Dict =drop_path_rate
A__ : str =hidden_act
A__ : Optional[Any] =layer_norm_eps
A__ : Optional[Any] =initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A__ : Optional[Any] =int(embed_dim * 2 ** (len(__lowercase ) - 1) )
A__ : Union[str, Any] =layer_scale_init_value
A__ : Dict =['''stem'''] + [f"stage{idx}" for idx in range(1 , len(__lowercase ) + 1 )]
A__ : List[Any] =get_aligned_output_features_output_indices(
out_features=__lowercase , out_indices=__lowercase , stage_names=self.stage_names )
| 215 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : int = """philschmid/bart-large-cnn-samsum"""
__snake_case : Optional[int] = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
__snake_case : Dict = """summarizer"""
__snake_case : str = AutoTokenizer
__snake_case : List[str] = AutoModelForSeqaSeqLM
__snake_case : List[Any] = ["""text"""]
__snake_case : Dict = ["""text"""]
def __lowercase ( self :int , __lowercase :List[Any] ):
return self.pre_processor(__lowercase , return_tensors='''pt''' , truncation=__lowercase )
def __lowercase ( self :Optional[int] , __lowercase :Optional[int] ):
return self.model.generate(**__lowercase )[0]
def __lowercase ( self :List[Any] , __lowercase :str ):
return self.pre_processor.decode(__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase )
| 179 | 0 |
def __UpperCamelCase ( a) ->int:
if not isinstance(a, a):
raise ValueError("Input must be an integer")
if input_num <= 0:
raise ValueError("Input must be positive")
return sum(
divisor for divisor in range(1, input_num // 2 + 1) if input_num % divisor == 0)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A_ = get_logger(__name__)
A_ = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
@add_start_docstrings(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
@add_start_docstrings(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
@add_start_docstrings(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
for processor in self:
lowerCamelCase__ = inspect.signature(processor.__call__ ).parameters
if len(_lowerCAmelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"Make sure that all the required parameters: {list(function_args.keys() )} for "
F"{processor.__class__} are passed to the logits processor." )
lowerCamelCase__ = processor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
else:
lowerCamelCase__ = processor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not (temperature > 0):
raise ValueError(F"`temperature` has to be a strictly positive float, but is {temperature}" )
lowerCamelCase__ = temperature
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = scores / self.temperature
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = -float("Inf" ) , _lowerCAmelCase = 1 ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or (min_tokens_to_keep < 1):
raise ValueError(F"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
lowerCamelCase__ = top_p
lowerCamelCase__ = filter_value
lowerCamelCase__ = min_tokens_to_keep
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = lax.top_k(_lowerCAmelCase , scores.shape[-1] )
lowerCamelCase__ = jnp.full_like(_lowerCAmelCase , self.filter_value )
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase , axis=-1 ).cumsum(axis=-1 )
lowerCamelCase__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowerCamelCase__ = jnp.roll(_lowerCAmelCase , 1 )
score_mask |= score_mask.at[:, 0].set(_lowerCAmelCase )
# min tokens to keep
lowerCamelCase__ = score_mask.at[:, : self.min_tokens_to_keep].set(_lowerCAmelCase )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jax.lax.sort_key_val(_lowerCAmelCase , _lowerCAmelCase )[-1]
return next_scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = -float("Inf" ) , _lowerCAmelCase = 1 ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or top_k <= 0:
raise ValueError(F"`top_k` has to be a strictly positive integer, but is {top_k}" )
lowerCamelCase__ = max(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = filter_value
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = scores.shape
lowerCamelCase__ = jnp.full(batch_size * vocab_size , self.filter_value )
lowerCamelCase__ = min(self.top_k , scores.shape[-1] ) # Safety check
lowerCamelCase__ , lowerCamelCase__ = lax.top_k(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.broadcast_to((jnp.arange(_lowerCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowerCamelCase__ = topk_scores.flatten()
lowerCamelCase__ = topk_indices.flatten() + shift
lowerCamelCase__ = next_scores_flat.at[topk_indices_flat].set(_lowerCAmelCase )
lowerCamelCase__ = next_scores_flat.reshape(_lowerCAmelCase , _lowerCAmelCase )
return next_scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
lowerCamelCase__ = bos_token_id
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jnp.full(scores.shape , -float("inf" ) )
lowerCamelCase__ = 1 - jnp.bool_(cur_len - 1 )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = max_length
lowerCamelCase__ = eos_token_id
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jnp.full(scores.shape , -float("inf" ) )
lowerCamelCase__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or min_length < 0:
raise ValueError(F"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or eos_token_id < 0:
raise ValueError(F"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
lowerCamelCase__ = min_length
lowerCamelCase__ = eos_token_id
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# create boolean flag to decide if min length penalty should be applied
lowerCamelCase__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = list(_lowerCAmelCase )
lowerCamelCase__ = begin_index
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = 1 - jnp.bool_(cur_len - self.begin_index )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
lowerCamelCase__ = list(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
lowerCamelCase__ = dict(_lowerCAmelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowerCamelCase__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowerCamelCase__ = force_token_array.at[index].set(_lowerCAmelCase )
lowerCamelCase__ = jnp.intaa(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
def _force_token(_lowerCAmelCase ):
lowerCamelCase__ = scores.shape[0]
lowerCamelCase__ = self.force_token_array[generation_idx]
lowerCamelCase__ = jnp.ones_like(_lowerCAmelCase , dtype=scores.dtype ) * -float("inf" )
lowerCamelCase__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowerCamelCase__ = lax.dynamic_update_slice(_lowerCAmelCase , _lowerCAmelCase , (0, current_token) )
return new_scores
lowerCamelCase__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_lowerCAmelCase ) , lambda: scores , ) , )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = generate_config.eos_token_id
lowerCamelCase__ = generate_config.no_timestamps_token_id
lowerCamelCase__ = generate_config.no_timestamps_token_id + 1
lowerCamelCase__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_lowerCAmelCase , "max_initial_timestamp_index" ):
lowerCamelCase__ = generate_config.max_initial_timestamp_index
else:
lowerCamelCase__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowerCamelCase__ = model_config.vocab_size
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# suppress <|notimestamps|> which is handled by without_timestamps
lowerCamelCase__ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jnp.where((cur_len - self.begin_index) >= 1 , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _lowerCAmelCase , )
lowerCamelCase__ = jnp.where((cur_len - self.begin_index) < 2 , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _lowerCAmelCase , _lowerCAmelCase , )
return jnp.where(
_lowerCAmelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , _lowerCAmelCase , )
lowerCamelCase__ = jax.vmap(_lowerCAmelCase )(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(cur_len == self.begin_index , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _lowerCAmelCase , )
lowerCamelCase__ = self.timestamp_begin + self.max_initial_timestamp_index
lowerCamelCase__ = jnp.where(
_lowerCAmelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , _lowerCAmelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowerCamelCase__ = jax.nn.log_softmax(_lowerCAmelCase , axis=-1 )
def handle_cumulative_probs(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowerCamelCase__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , _lowerCAmelCase , )
lowerCamelCase__ = jax.vmap(_lowerCAmelCase )(_lowerCAmelCase , _lowerCAmelCase )
return scores
| 360 | 1 |
import os
# Precomputes a list of the 100 first triangular numbers
__A = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = os.path.dirname(os.path.realpath(UpperCamelCase__ ) )
__lowerCamelCase = os.path.join(UpperCamelCase__ , 'words.txt' )
__lowerCamelCase = ''
with open(UpperCamelCase__ ) as f:
__lowerCamelCase = f.readline()
__lowerCamelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
__lowerCamelCase = [
word
for word in [sum(ord(UpperCamelCase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(UpperCamelCase__ )
if __name__ == "__main__":
print(solution())
| 469 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowerCamelCase = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(lowerCamelCase__ ) , torch_builtin(lowerCamelCase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(lowerCamelCase__ ) , gelu_new(lowerCamelCase__ ) ) )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowerCamelCase = get_activation('gelu' )
__lowerCamelCase = get_activation('gelu_10' )
__lowerCamelCase = torch_builtin(lowerCamelCase__ )
__lowerCamelCase = geluaa(lowerCamelCase__ )
__lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowerCamelCase__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(lowerCamelCase__ ):
get_activation('bogus' )
with self.assertRaises(lowerCamelCase__ ):
get_activation(lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = get_activation('gelu' )
__lowerCamelCase = 1
__lowerCamelCase = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowerCamelCase__ ):
__lowerCamelCase = acta.a
| 469 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase_ = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__snake_case : Tuple = len([g for position, g in enumerate(__SCREAMING_SNAKE_CASE ) if g == main_target[position]] )
return (item, float(__SCREAMING_SNAKE_CASE ))
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__snake_case : Tuple = random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
__snake_case : Dict = parent_a[:random_slice] + parent_a[random_slice:]
__snake_case : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : list[str] ):
'''simple docstring'''
__snake_case : str = list(__SCREAMING_SNAKE_CASE )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__snake_case : Any = random.choice(__SCREAMING_SNAKE_CASE )
return "".join(__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : tuple[str, float] , __SCREAMING_SNAKE_CASE : list[tuple[str, float]] , __SCREAMING_SNAKE_CASE : list[str] , ):
'''simple docstring'''
__snake_case : Union[str, Any] = []
# Generate more children proportionally to the fitness score.
__snake_case : str = int(parent_a[1] * 1_0_0 ) + 1
__snake_case : Any = 1_0 if child_n >= 1_0 else child_n
for _ in range(__SCREAMING_SNAKE_CASE ):
__snake_case : Dict = population_score[random.randint(0 , __SCREAMING_SNAKE_CASE )][0]
__snake_case , __snake_case : List[Any] = crossover(parent_a[0] , __SCREAMING_SNAKE_CASE )
# Append new string to the population list.
pop.append(mutate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
pop.append(mutate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
return pop
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : list[str] , __SCREAMING_SNAKE_CASE : bool = True ):
'''simple docstring'''
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__snake_case : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__SCREAMING_SNAKE_CASE )
# Verify that the target contains no genes besides the ones inside genes variable.
__snake_case : Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__snake_case : str = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__SCREAMING_SNAKE_CASE )
# Generate random starting population.
__snake_case : Optional[Any] = []
for _ in range(__SCREAMING_SNAKE_CASE ):
population.append("""""".join([random.choice(__SCREAMING_SNAKE_CASE ) for i in range(len(__SCREAMING_SNAKE_CASE ) )] ) )
# Just some logs to know what the algorithms is doing.
__snake_case , __snake_case : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__SCREAMING_SNAKE_CASE )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__snake_case : Optional[int] = [evaluate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for item in population]
# Check if there is a matching evolution.
__snake_case : Dict = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x[1] , reverse=__SCREAMING_SNAKE_CASE )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__snake_case : Any = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__SCREAMING_SNAKE_CASE )
# Normalize population score to be between 0 and 1.
__snake_case : Union[str, Any] = [
(item, score / len(__SCREAMING_SNAKE_CASE )) for item, score in population_score
]
# This is selection
for i in range(__SCREAMING_SNAKE_CASE ):
population.extend(select(population_score[int(__SCREAMING_SNAKE_CASE )] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__SCREAMING_SNAKE_CASE ) > N_POPULATION:
break
if __name__ == "__main__":
lowercase_ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowercase_ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowercase_ , lowercase_ , lowercase_ = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 390 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 390 | 1 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def A__ ( A : List[Any] , A : Any):
'''simple docstring'''
assert isinstance(A , A)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def A__ ( A : str , A : Tuple , A : Dict , A : Union[str, Any]):
'''simple docstring'''
UpperCamelCase : str = tmp_path / "cache"
UpperCamelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase : Optional[Any] = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=A , keep_in_memory=A).read()
_check_sql_dataset(A , A)
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def A__ ( A : Optional[Any] , A : int , A : Dict , A : List[Any]):
'''simple docstring'''
UpperCamelCase : Optional[int] = tmp_path / "cache"
UpperCamelCase : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase : Tuple = features.copy() if features else default_expected_features
UpperCamelCase : List[Any] = (
Features({feature: Value(A) for feature, dtype in features.items()}) if features is not None else None
)
UpperCamelCase : List[str] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=A , cache_dir=A).read()
_check_sql_dataset(A , A)
def A__ ( A : List[Any]):
'''simple docstring'''
with contextlib.closing(sqlitea.connect(A)) as con:
UpperCamelCase : List[str] = con.cursor()
cur.execute("SELECT * FROM dataset")
for row in cur:
yield row
@require_sqlalchemy
def A__ ( A : int , A : List[str] , A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = tmp_path / "cache"
UpperCamelCase : List[str] = os.path.join(A , "tmp.sql")
UpperCamelCase : List[Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=A).read()
SqlDatasetWriter(A , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1).write()
UpperCamelCase : List[str] = iter_sql_file(A)
UpperCamelCase : Tuple = iter_sql_file(A)
for rowa, rowa in zip(A , A):
assert rowa == rowa
@require_sqlalchemy
def A__ ( A : Tuple , A : Dict , A : Optional[int]):
'''simple docstring'''
UpperCamelCase : Tuple = tmp_path / "cache"
UpperCamelCase : str = os.path.join(A , "tmp.sql")
UpperCamelCase : Optional[Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=A).read()
SqlDatasetWriter(A , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2).write()
UpperCamelCase : Any = iter_sql_file(A)
UpperCamelCase : str = iter_sql_file(A)
for rowa, rowa in zip(A , A):
assert rowa == rowa
@require_sqlalchemy
def A__ ( A : Any , A : List[str] , A : str):
'''simple docstring'''
UpperCamelCase : List[Any] = tmp_path / "cache"
UpperCamelCase : Tuple = os.path.join(A , "tmp.sql")
UpperCamelCase : Tuple = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=A).read()
with pytest.raises(A):
SqlDatasetWriter(A , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0).write()
| 173 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 173 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Union[str, Any] = 'timm_backbone'
def __init__(self : Optional[Any] , a__ : List[str]=None , a__ : Optional[int]=3 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : List[str]=None , **a__ : Tuple , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = backbone
__snake_case = num_channels
__snake_case = features_only
__snake_case = use_pretrained_backbone
__snake_case = True
__snake_case = out_indices if out_indices is not None else (-1,)
| 388 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Any ) -> Optional[Any]:
__snake_case = checkpoint
__snake_case = {}
__snake_case = vae_state_dict['''encoder.conv_in.weight''']
__snake_case = vae_state_dict['''encoder.conv_in.bias''']
__snake_case = vae_state_dict['''encoder.conv_out.weight''']
__snake_case = vae_state_dict['''encoder.conv_out.bias''']
__snake_case = vae_state_dict['''encoder.norm_out.weight''']
__snake_case = vae_state_dict['''encoder.norm_out.bias''']
__snake_case = vae_state_dict['''decoder.conv_in.weight''']
__snake_case = vae_state_dict['''decoder.conv_in.bias''']
__snake_case = vae_state_dict['''decoder.conv_out.weight''']
__snake_case = vae_state_dict['''decoder.conv_out.bias''']
__snake_case = vae_state_dict['''decoder.norm_out.weight''']
__snake_case = vae_state_dict['''decoder.norm_out.bias''']
__snake_case = vae_state_dict['''quant_conv.weight''']
__snake_case = vae_state_dict['''quant_conv.bias''']
__snake_case = vae_state_dict['''post_quant_conv.weight''']
__snake_case = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
__snake_case = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
__snake_case = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
__snake_case = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
__snake_case = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
__snake_case = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
__snake_case = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
__snake_case = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
__snake_case = renew_vae_resnet_paths(snake_case_ )
__snake_case = {'''old''': f"""down.{i}.block""", '''new''': f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
__snake_case = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
__snake_case = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__snake_case = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
__snake_case = renew_vae_resnet_paths(snake_case_ )
__snake_case = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
__snake_case = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
__snake_case = renew_vae_attention_paths(snake_case_ )
__snake_case = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
__snake_case = num_up_blocks - 1 - i
__snake_case = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
__snake_case = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
__snake_case = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
__snake_case = renew_vae_resnet_paths(snake_case_ )
__snake_case = {'''old''': f"""up.{block_id}.block""", '''new''': f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
__snake_case = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
__snake_case = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__snake_case = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
__snake_case = renew_vae_resnet_paths(snake_case_ )
__snake_case = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
__snake_case = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
__snake_case = renew_vae_attention_paths(snake_case_ )
__snake_case = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str , ) -> int:
# Only support V1
__snake_case = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
__snake_case = io.BytesIO(r.content )
__snake_case = OmegaConf.load(snake_case_ )
__snake_case = 512
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
__snake_case = {}
with safe_open(snake_case_ , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
__snake_case = f.get_tensor(snake_case_ )
else:
__snake_case = torch.load(snake_case_ , map_location=snake_case_ )['''state_dict''']
# Convert the VAE model.
__snake_case = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
__snake_case = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
__snake_case = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
snake_case_ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 388 | 1 |
"""simple docstring"""
def __a ( A , A ) -> Dict:
'''simple docstring'''
A__ = 0
A__ = len(A ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(A ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def __a ( A , A , A , A ) -> Optional[int]:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(A ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(A , A , A , A )
elif point > right:
return interpolation_search_by_recursion(A , A , A , A )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
A , A , A , point - 1 )
else:
return interpolation_search_by_recursion(
A , A , point + 1 , A )
def __a ( A ) -> Tuple:
'''simple docstring'''
if collection != sorted(A ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
__UpperCAmelCase =0
if debug == 1:
__UpperCAmelCase =[10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
__UpperCAmelCase =67
__UpperCAmelCase =interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print("""Not found""") | 337 |
"""simple docstring"""
import math
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return math.sqrt(lowerCAmelCase ) * math.sqrt(lowerCAmelCase ) == num
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = 0
UpperCAmelCase = n
while left <= right:
UpperCAmelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase = mid - 1
else:
UpperCAmelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 0 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case : Optional[int] = BarthezTokenizer
snake_case : Dict = BarthezTokenizerFast
snake_case : Optional[Any] = True
snake_case : Dict = True
def _lowerCamelCase ( self ):
super().setUp()
UpperCamelCase__ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=A_ )
UpperCamelCase__ = tokenizer
def _lowerCamelCase ( self ):
UpperCamelCase__ = """<pad>"""
UpperCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def _lowerCamelCase ( self ):
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A_ ) , 101122 )
def _lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase__ = [0, 57, 3018, 70307, 91, 2]
UpperCamelCase__ = self.tokenizer(
A_ , max_length=len(A_ ) , padding=A_ , truncation=A_ , return_tensors="""pt""" )
self.assertIsInstance(A_ , A_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(A_ , A_ )
def _lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = tokenizer.tokenize(A_ )
UpperCamelCase__ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase__ = tokenizer.encode(A_ , add_special_tokens=A_ )
UpperCamelCase__ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = tokenizer.encode(A_ )
UpperCamelCase__ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase__ = [
"""Le transformeur est un modèle d\'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=A_ , )
| 703 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[Any] = """data2vec-audio"""
def __init__( self , __lowerCAmelCase=32 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-5 , __lowerCAmelCase="gelu" , __lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase=False , __lowerCAmelCase=16 , __lowerCAmelCase=19 , __lowerCAmelCase=5 , __lowerCAmelCase=0.05 , __lowerCAmelCase=10 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0 , __lowerCAmelCase=10 , __lowerCAmelCase=0 , __lowerCAmelCase="sum" , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=256 , __lowerCAmelCase=(512, 512, 512, 512, 1500) , __lowerCAmelCase=(5, 3, 3, 1, 1) , __lowerCAmelCase=(1, 2, 3, 1, 1) , __lowerCAmelCase=512 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=False , __lowerCAmelCase=3 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=None , **__lowerCAmelCase , ):
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = feat_extract_activation
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = conv_bias
UpperCamelCase__ = num_conv_pos_embeddings
UpperCamelCase__ = num_conv_pos_embedding_groups
UpperCamelCase__ = conv_pos_kernel_size
UpperCamelCase__ = len(self.conv_dim )
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = feat_proj_dropout
UpperCamelCase__ = final_dropout
UpperCamelCase__ = layerdrop
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
UpperCamelCase__ = vocab_size
UpperCamelCase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ = mask_time_prob
UpperCamelCase__ = mask_time_length
UpperCamelCase__ = mask_time_min_masks
UpperCamelCase__ = mask_feature_prob
UpperCamelCase__ = mask_feature_length
UpperCamelCase__ = mask_feature_min_masks
# ctc loss
UpperCamelCase__ = ctc_loss_reduction
UpperCamelCase__ = ctc_zero_infinity
# adapter
UpperCamelCase__ = add_adapter
UpperCamelCase__ = adapter_kernel_size
UpperCamelCase__ = adapter_stride
UpperCamelCase__ = num_adapter_layers
UpperCamelCase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = xvector_output_dim
@property
def _lowerCamelCase ( self ):
return math.prod(self.conv_stride )
| 548 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowercase : List[Any] =logging.get_logger(__name__)
if is_vision_available():
import PIL
class A ( __lowercase ):
_snake_case =['''pixel_values''']
def __init__( self: int , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[int, float] = 1 / 255 , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: bool = True , **_lowerCAmelCase: Union[str, Any] , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
UpperCAmelCase_ =resample
UpperCAmelCase_ =do_center_crop
UpperCAmelCase_ =crop_size
UpperCAmelCase_ =do_rescale
UpperCAmelCase_ =rescale_factor
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ =image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ =do_convert_rgb
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Optional[int] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
UpperCAmelCase_ =get_resize_output_image_size(_lowerCAmelCase , size=size["shortest_edge"] , default_to_square=_lowerCAmelCase )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Optional[int] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[int, float] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Dict , ) -> int:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: ImageInput , _lowerCAmelCase: bool = None , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: PILImageResampling = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: int = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: float = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowerCAmelCase: Dict , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ =size if size is not None else self.size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , param_name="size" , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =resample if resample is not None else self.resample
UpperCAmelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ =crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , param_name="crop_size" , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ =image_std if image_std is not None else self.image_std
UpperCAmelCase_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ =[convert_to_rgb(_lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
UpperCAmelCase_ =[self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ =[self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
UpperCAmelCase_ =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
UpperCAmelCase_ ={"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 54 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 349 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (PNDMScheduler,)
_SCREAMING_SNAKE_CASE = (("""num_inference_steps""", 50),)
def A ( self : Any , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**UpperCamelCase__ )
return config
def A ( self : Union[str, Any] , UpperCamelCase__ : Tuple=0 , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop('num_inference_steps' , UpperCamelCase__ )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config(**UpperCamelCase__ )
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
UpperCamelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
UpperCamelCase = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
UpperCamelCase = dummy_past_residuals[:]
UpperCamelCase = scheduler.step_prk(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
UpperCamelCase = new_scheduler.step_prk(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCamelCase = scheduler.step_plms(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
UpperCamelCase = new_scheduler.step_plms(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : Tuple ):
"""simple docstring"""
pass
def A ( self : int , UpperCamelCase__ : List[Any]=0 , **UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop('num_inference_steps' , UpperCamelCase__ )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
UpperCamelCase = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase = dummy_past_residuals[:]
UpperCamelCase = scheduler.step_prk(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
UpperCamelCase = new_scheduler.step_prk(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCamelCase = scheduler.step_plms(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
UpperCamelCase = new_scheduler.step_plms(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : Optional[int] , **UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(**UpperCamelCase__ )
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
UpperCamelCase = 1_0
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = scheduler.step_prk(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = scheduler.step_plms(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop('num_inference_steps' , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , 'set_timesteps' ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , 'set_timesteps' ):
UpperCamelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCamelCase = dummy_past_residuals[:]
UpperCamelCase = scheduler.step_prk(UpperCamelCase__ , 0 , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
UpperCamelCase = scheduler.step_prk(UpperCamelCase__ , 1 , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCamelCase = scheduler.step_plms(UpperCamelCase__ , 0 , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
UpperCamelCase = scheduler.step_plms(UpperCamelCase__ , 1 , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : Dict ):
"""simple docstring"""
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase__ )
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def A ( self : Union[str, Any] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = 2_7
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCamelCase = scheduler.step_prk(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
def A ( self : Any ):
"""simple docstring"""
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.full_loop()
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1E-3
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.full_loop(prediction_type='v_prediction' )
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1E-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1E-3
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.full_loop(set_alpha_to_one=UpperCamelCase__ , beta_start=0.0_1 )
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1E-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1E-3
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.full_loop(set_alpha_to_one=UpperCamelCase__ , beta_start=0.0_1 )
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1E-3
| 324 |
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowerCamelCase ( A__ ) -> Any:
"""simple docstring"""
# A local function to see if a dot lands in the circle.
def is_in_circle(A__ , A__ ) -> bool:
UpperCamelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCamelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(A__ ) )
# The ratio of the area for circle to square is pi/4.
UpperCamelCase = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def __lowerCamelCase ( A__ , A__ , A__ = 0.0 , A__ = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(A__ , A__ ) ) for _ in range(A__ ) ) * (max_value - min_value)
def __lowerCamelCase ( A__ , A__ = 0.0 , A__ = 1.0 ) -> None:
"""simple docstring"""
def identity_function(A__ ) -> float:
return x
UpperCamelCase = area_under_curve_estimator(
A__ , A__ , A__ , A__ )
UpperCamelCase = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print('******************' )
def __lowerCamelCase ( A__ ) -> None:
"""simple docstring"""
def function_to_integrate(A__ ) -> float:
return sqrt(4.0 - x * x )
UpperCamelCase = area_under_curve_estimator(
A__ , A__ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 | 1 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : List[str], UpperCamelCase__ : str=True, UpperCamelCase__ : Tuple="pt" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] ={'''add_prefix_space''': True} if isinstance(UpperCamelCase__, UpperCamelCase__ ) and not line.startswith(''' ''' ) else {}
SCREAMING_SNAKE_CASE__ : str =padding_side
return tokenizer(
[line], max_length=UpperCamelCase__, padding='''max_length''' if pad_to_max_length else None, truncation=UpperCamelCase__, return_tensors=UpperCamelCase__, add_special_tokens=UpperCamelCase__, **UpperCamelCase__, )
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : List[Any], UpperCamelCase__ : int=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids.ne(UpperCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : Dict , __lowercase : int , __lowercase : Optional[Any] , __lowercase : str , __lowercase : int , __lowercase : Optional[Any]="train" , __lowercase : Optional[Any]=None , __lowercase : List[str]=None , __lowercase : Optional[int]=None , __lowercase : str="" , ) -> Union[str, Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ : List[str] =Path(__lowercase ).joinpath(type_path + '''.source''' )
SCREAMING_SNAKE_CASE__ : List[Any] =Path(__lowercase ).joinpath(type_path + '''.target''' )
SCREAMING_SNAKE_CASE__ : Tuple =self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE__ : Optional[int] =max_source_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
SCREAMING_SNAKE_CASE__ : Any =tokenizer
SCREAMING_SNAKE_CASE__ : Tuple =prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE__ : str =src_lang
SCREAMING_SNAKE_CASE__ : Optional[int] =tgt_lang
def __len__( self : str ) -> Tuple:
return len(self.src_lens )
def __getitem__( self : Optional[Any] , __lowercase : int ) -> Dict[str, torch.Tensor]:
SCREAMING_SNAKE_CASE__ : Any =index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.prefix + linecache.getline(str(self.src_file ) , __lowercase ).rstrip('''\n''' )
SCREAMING_SNAKE_CASE__ : Any =linecache.getline(str(self.tgt_file ) , __lowercase ).rstrip('''\n''' )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE__ : Optional[int] =(
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
)
SCREAMING_SNAKE_CASE__ : Optional[int] =self.tokenizer.generator if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
SCREAMING_SNAKE_CASE__ : str =encode_line(__lowercase , __lowercase , self.max_source_length , '''right''' )
SCREAMING_SNAKE_CASE__ : List[str] =encode_line(__lowercase , __lowercase , self.max_target_length , '''right''' )
SCREAMING_SNAKE_CASE__ : List[Any] =source_inputs['''input_ids'''].squeeze()
SCREAMING_SNAKE_CASE__ : Any =target_inputs['''input_ids'''].squeeze()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __magic_name__ ( __lowercase : Any ) -> Optional[Any]:
return [len(__lowercase ) for x in Path(__lowercase ).open().readlines()]
def __magic_name__ ( self : Any , __lowercase : Any ) -> Dict[str, torch.Tensor]:
SCREAMING_SNAKE_CASE__ : str =torch.stack([x['''input_ids'''] for x in batch] )
SCREAMING_SNAKE_CASE__ : Dict =torch.stack([x['''attention_mask'''] for x in batch] )
SCREAMING_SNAKE_CASE__ : Any =torch.stack([x['''decoder_input_ids'''] for x in batch] )
SCREAMING_SNAKE_CASE__ : str =(
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE__ : Optional[Any] =(
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] =trim_batch(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =trim_batch(__lowercase , __lowercase , attention_mask=__lowercase )
SCREAMING_SNAKE_CASE__ : str ={
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
a_ = getLogger(__name__)
def _a( UpperCamelCase__ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(UpperCamelCase__ ) )
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =get_git_info()
save_json(UpperCamelCase__, os.path.join(UpperCamelCase__, '''git_log.json''' ) )
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Any, UpperCamelCase__ : List[Any]=4, **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
with open(UpperCamelCase__, '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__, indent=UpperCamelCase__, **UpperCamelCase__ )
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
with open(UpperCamelCase__ ) as f:
return json.load(UpperCamelCase__ )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =git.Repo(search_parent_directories=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ={
'''repo_id''': str(UpperCamelCase__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def _a( UpperCamelCase__ : Callable, UpperCamelCase__ : Iterable ):
'''simple docstring'''
return list(map(UpperCamelCase__, UpperCamelCase__ ) )
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : str ):
'''simple docstring'''
with open(UpperCamelCase__, '''wb''' ) as f:
return pickle.dump(UpperCamelCase__, UpperCamelCase__ )
def _a( UpperCamelCase__ : Tuple ):
'''simple docstring'''
def remove_articles(UpperCamelCase__ : Any ):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', UpperCamelCase__ )
def white_space_fix(UpperCamelCase__ : Any ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ : Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =normalize_answer(UpperCamelCase__ ).split()
SCREAMING_SNAKE_CASE__ : str =normalize_answer(UpperCamelCase__ ).split()
SCREAMING_SNAKE_CASE__ : str =Counter(UpperCamelCase__ ) & Counter(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE__ : str =1.0 * num_same / len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] =1.0 * num_same / len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =(2 * precision * recall) / (precision + recall)
return fa
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : int ):
'''simple docstring'''
return normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ )
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : List[str] ):
'''simple docstring'''
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for hypo, pred in zip(UpperCamelCase__, UpperCamelCase__ ):
em += exact_match_score(UpperCamelCase__, UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
em /= len(UpperCamelCase__ )
return {"em": em}
def _a( UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Any, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int ={p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE__ : List[str] ='''dropout_rate'''
for p in extra_params:
if getattr(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
if not hasattr(UpperCamelCase__, UpperCamelCase__ ) and not hasattr(UpperCamelCase__, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(UpperCamelCase__ ) )
delattr(UpperCamelCase__, UpperCamelCase__ )
continue
SCREAMING_SNAKE_CASE__ : Dict =p if hasattr(UpperCamelCase__, UpperCamelCase__ ) else equivalent_param[p]
setattr(UpperCamelCase__, UpperCamelCase__, getattr(UpperCamelCase__, UpperCamelCase__ ) )
delattr(UpperCamelCase__, UpperCamelCase__ )
return hparams, config | 296 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =min(UpperCamelCase__ ) # min() finds the minimum value
SCREAMING_SNAKE_CASE__ : int =max(UpperCamelCase__ ) # max() finds the maximum value
SCREAMING_SNAKE_CASE__ : List[Any] =max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
SCREAMING_SNAKE_CASE__ : Tuple =[0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(UpperCamelCase__, UpperCamelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
SCREAMING_SNAKE_CASE__ : List[Any] =0
for count in range(UpperCamelCase__ ):
while holes[count] > 0:
holes[count] -= 1
SCREAMING_SNAKE_CASE__ : Any =count + min_val
i += 1
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(UpperCamelCase__ )
print('''Sorted order is:''', ''' '''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
main() | 296 | 1 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self :Union[str, Any] , _lowercase :Optional[Any] , _lowercase :str=13 , _lowercase :Optional[Any]=30 , _lowercase :List[str]=2 , _lowercase :int=3 , _lowercase :Optional[int]=True , _lowercase :Dict=True , _lowercase :str=32 , _lowercase :Any=2 , _lowercase :Any=4 , _lowercase :Union[str, Any]=37 , _lowercase :List[str]="gelu" , _lowercase :List[Any]=0.1 , _lowercase :Union[str, Any]=0.1 , _lowercase :Tuple=10 , _lowercase :List[Any]=0.02 , _lowercase :str=3 , _lowercase :int=None , ) -> Union[str, Any]:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def __a ( self :Tuple) -> List[Any]:
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def __a ( self :Optional[Any]) -> Tuple:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def __a ( self :int , _lowercase :int , _lowercase :Tuple , _lowercase :Optional[Any]) -> str:
UpperCAmelCase_ = TFViTModel(config=_lowercase)
UpperCAmelCase_ = model(_lowercase , training=_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ = self.image_size // 2
UpperCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ = model(_lowercase , interpolate_pos_encoding=_lowercase , training=_lowercase)
UpperCAmelCase_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size))
def __a ( self :List[Any] , _lowercase :Optional[Any] , _lowercase :str , _lowercase :int) -> Tuple:
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = TFViTForImageClassification(_lowercase)
UpperCAmelCase_ = model(_lowercase , labels=_lowercase , training=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ = self.image_size // 2
UpperCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ = model(_lowercase , interpolate_pos_encoding=_lowercase , training=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = TFViTForImageClassification(_lowercase)
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __a ( self :List[Any]) -> Tuple:
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( _snake_case , _snake_case , unittest.TestCase ):
UpperCamelCase__ : Optional[Any] =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase__ : str =(
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : str =False
UpperCamelCase__ : int =False
def __a ( self :List[Any]) -> Optional[int]:
UpperCAmelCase_ = TFViTModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37)
def __a ( self :Tuple) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def __a ( self :Optional[Any]) -> Tuple:
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def __a ( self :Dict) -> Any:
pass
def __a ( self :Union[str, Any]) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_lowercase)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , tf.keras.layers.Layer))
def __a ( self :Tuple) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_lowercase)
UpperCAmelCase_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase)
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
def __a ( self :Union[str, Any]) -> List[str]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase)
@slow
def __a ( self :Any) -> Any:
UpperCAmelCase_ = TFViTModel.from_pretrained('''google/vit-base-patch16-224''')
self.assertIsNotNone(_lowercase)
def A ( ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def __a ( self :Optional[Any]) -> Tuple:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''') if is_vision_available() else None
@slow
def __a ( self :List[str]) -> int:
UpperCAmelCase_ = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''')
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_lowercase , return_tensors='''tf''')
# forward pass
UpperCAmelCase_ = model(**_lowercase)
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , _lowercase)
UpperCAmelCase_ = tf.constant([-0.2_744, 0.8_215, -0.0_836])
tf.debugging.assert_near(outputs.logits[0, :3] , _lowercase , atol=1E-4)
| 561 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : List[Any] =SpeechTaTokenizer
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : List[Any] =True
def __a ( self :Union[str, Any]) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = SpeechTaTokenizer(_lowercase)
UpperCAmelCase_ = AddedToken('''<mask>''' , lstrip=_lowercase , rstrip=_lowercase)
UpperCAmelCase_ = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token})
tokenizer.add_tokens(['''<ctc_blank>'''])
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Tuple , _lowercase :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = '''this is a test'''
UpperCAmelCase_ = '''this is a test'''
return input_text, output_text
def __a ( self :List[Any] , _lowercase :str , _lowercase :List[str]=False , _lowercase :Union[str, Any]=20 , _lowercase :Any=5) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_input_output_texts(_lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase)
return text, ids
def __a ( self :Dict) -> str:
UpperCAmelCase_ = '''<pad>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :int) -> Dict:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(vocab_keys[-4] , '''œ''')
self.assertEqual(vocab_keys[-2] , '''<mask>''')
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''')
self.assertEqual(len(_lowercase) , 81)
def __a ( self :Dict) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 79)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.get_tokenizers(do_lower_case=_lowercase)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(_lowercase)
self.assertNotEqual(_lowercase , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase_ = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
UpperCAmelCase_ = tokenizer.add_tokens(_lowercase)
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(_lowercase)
self.assertNotEqual(_lowercase , 0)
self.assertEqual(_lowercase , _lowercase)
self.assertEqual(_lowercase , len(_lowercase))
self.assertEqual(_lowercase , all_size + len(_lowercase))
UpperCAmelCase_ = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_lowercase)
self.assertGreaterEqual(len(_lowercase) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
UpperCAmelCase_ = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
UpperCAmelCase_ = tokenizer.add_special_tokens(_lowercase)
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(_lowercase)
self.assertNotEqual(_lowercase , 0)
self.assertEqual(_lowercase , _lowercase)
self.assertEqual(_lowercase , len(_lowercase))
self.assertEqual(_lowercase , all_size_a + len(_lowercase))
UpperCAmelCase_ = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_lowercase)
self.assertGreaterEqual(len(_lowercase) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
def __a ( self :Any) -> List[str]:
pass
def __a ( self :Any) -> Tuple:
pass
def __a ( self :Dict) -> Dict:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
# fmt: off
self.assertListEqual(_lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''])
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
# fmt: off
self.assertListEqual(_lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26])
# fmt: on
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''])
@slow
def __a ( self :Any) -> List[Any]:
# Use custom sequence because this tokenizer does not handle numbers.
UpperCAmelCase_ = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
UpperCAmelCase_ = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_lowercase , )
| 561 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : Dict =logging.get_logger(__name__)
_UpperCamelCase : Optional[Any] ={
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Any = "xmod"
def __init__( self ,A__=30522 ,A__=768 ,A__=12 ,A__=12 ,A__=3072 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=512 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=1 ,A__=0 ,A__=2 ,A__="absolute" ,A__=True ,A__=None ,A__=False ,A__=2 ,A__=False ,A__=True ,A__=True ,A__=("en_XX",) ,A__=None ,**A__ ,):
super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__ )
_A : Union[str, Any] = vocab_size
_A : List[str] = hidden_size
_A : Union[str, Any] = num_hidden_layers
_A : str = num_attention_heads
_A : Tuple = hidden_act
_A : Optional[int] = intermediate_size
_A : List[str] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Dict = max_position_embeddings
_A : Optional[int] = type_vocab_size
_A : List[str] = initializer_range
_A : Tuple = layer_norm_eps
_A : int = position_embedding_type
_A : str = use_cache
_A : int = classifier_dropout
_A : Optional[Any] = pre_norm
_A : Dict = adapter_reduction_factor
_A : List[Any] = adapter_layer_norm
_A : Optional[Any] = adapter_reuse_layer_norm
_A : Optional[Any] = ln_before_adapter
_A : int = list(A__ )
_A : str = default_language
class UpperCAmelCase__ ( __snake_case ):
@property
def A__ ( self ):
if self.task == "multiple-choice":
_A : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_A : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 332 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_UpperCamelCase : str =argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
_UpperCamelCase : Any =parser.parse_args()
_UpperCamelCase : List[str] ='cpu'
_UpperCamelCase : Optional[int] ='a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
_UpperCamelCase : int ='path-to-your-trained-model'
_UpperCamelCase : str =StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_UpperCamelCase : int =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_UpperCamelCase : Union[str, Any] =pipe.to(device)
# to channels last
_UpperCamelCase : Optional[Any] =pipe.unet.to(memory_format=torch.channels_last)
_UpperCamelCase : List[str] =pipe.vae.to(memory_format=torch.channels_last)
_UpperCamelCase : Union[str, Any] =pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_UpperCamelCase : int =pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_UpperCamelCase : List[Any] =torch.randn(2, 4, 64, 64)
_UpperCamelCase : Tuple =torch.rand(1) * 999
_UpperCamelCase : Tuple =torch.randn(2, 77, 768)
_UpperCamelCase : int =(sample, timestep, encoder_hidden_status)
try:
_UpperCamelCase : Dict =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_UpperCamelCase : List[str] =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_UpperCamelCase : Dict =ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_UpperCamelCase : Optional[int] =ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_UpperCamelCase : Union[str, Any] =ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_UpperCamelCase : Tuple =666
_UpperCamelCase : List[Any] =torch.Generator(device).manual_seed(seed)
_UpperCamelCase : List[str] ={'generator': generator}
if args.steps is not None:
_UpperCamelCase : str =args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_UpperCamelCase : List[Any] =pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 332 | 1 |
'''simple docstring'''
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
snake_case_ = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
snake_case_ = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : Dict = key
for k, v in WHISPER_MAPPING.items():
if k in key:
SCREAMING_SNAKE_CASE_ : int = new_key.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F"{key} -> {new_key}" )
SCREAMING_SNAKE_CASE_ : List[str] = s_dict.pop(SCREAMING_SNAKE_CASE_ )
return s_dict
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = emb.weight.shape
SCREAMING_SNAKE_CASE_ : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = emb.weight.data
return lin_layer
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> bytes:
"""simple docstring"""
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : str = os.path.basename(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : List[str] = url.split("/" )[-2]
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not os.path.isfile(SCREAMING_SNAKE_CASE_ ):
raise RuntimeError(F"{download_target} exists and is not a regular file" )
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Dict = open(SCREAMING_SNAKE_CASE_ , "rb" ).read()
if hashlib.shaaaa(SCREAMING_SNAKE_CASE_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(SCREAMING_SNAKE_CASE_ ) as source, open(SCREAMING_SNAKE_CASE_ , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=8_0 , unit="iB" , unit_scale=SCREAMING_SNAKE_CASE_ , unit_divisor=1_0_2_4 ) as loop:
while True:
SCREAMING_SNAKE_CASE_ : Any = source.read(8_1_9_2 )
if not buffer:
break
output.write(SCREAMING_SNAKE_CASE_ )
loop.update(len(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = open(SCREAMING_SNAKE_CASE_ , "rb" ).read()
if hashlib.shaaaa(SCREAMING_SNAKE_CASE_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
"""simple docstring"""
if ".pt" not in checkpoint_path:
SCREAMING_SNAKE_CASE_ : int = _download(_MODELS[checkpoint_path] )
else:
SCREAMING_SNAKE_CASE_ : str = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
SCREAMING_SNAKE_CASE_ : Optional[int] = original_checkpoint["dims"]
SCREAMING_SNAKE_CASE_ : int = original_checkpoint["model_state_dict"]
SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
rename_keys(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = state_dict["decoder.layers.0.fc1.weight"].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=SCREAMING_SNAKE_CASE_ , decoder_ffn_dim=SCREAMING_SNAKE_CASE_ , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
SCREAMING_SNAKE_CASE_ : List[str] = WhisperForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0 and not set(SCREAMING_SNAKE_CASE_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
SCREAMING_SNAKE_CASE_ : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
SCREAMING_SNAKE_CASE_ : List[str] = proj_out_weights
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
snake_case_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 421 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Tuple=3_2 , SCREAMING_SNAKE_CASE_ : Dict=1_0 , SCREAMING_SNAKE_CASE_ : Any=1_0_0 , SCREAMING_SNAKE_CASE_ : Any=1_0_2_6 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : List[Any]="data/tokenized_stories_train_wikitext103.jbl" , SCREAMING_SNAKE_CASE_ : Optional[Any]="igf_context_pairs.jbl" , ) -> Dict:
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Union[str, Any] = generate_datasets(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , number=SCREAMING_SNAKE_CASE_ , min_len=1_0_2_6 , trim=SCREAMING_SNAKE_CASE_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE_ : List[str] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
SCREAMING_SNAKE_CASE_ : List[Any] = load_gpta("gpt2" ).to(SCREAMING_SNAKE_CASE_ )
print("computing perplexity on objective set" )
SCREAMING_SNAKE_CASE_ : Tuple = compute_perplexity(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).item()
print("perplexity on objective set:" , SCREAMING_SNAKE_CASE_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str=1_5 , SCREAMING_SNAKE_CASE_ : int=1_2_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_0_0 , SCREAMING_SNAKE_CASE_ : Any="igf_model.pt" , ) -> List[str]:
"""simple docstring"""
set_seed(4_2 )
# Load pre-trained model
SCREAMING_SNAKE_CASE_ : Optional[Any] = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE_ : str = SecondaryLearner(SCREAMING_SNAKE_CASE_ )
# Train secondary learner
SCREAMING_SNAKE_CASE_ : List[str] = train_secondary_learner(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_epochs=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , eval_freq=1_0_0 , igf_model_path=SCREAMING_SNAKE_CASE_ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any]=3_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_0_0_0 , SCREAMING_SNAKE_CASE_ : Tuple=1_6 , SCREAMING_SNAKE_CASE_ : Dict=1.0 , SCREAMING_SNAKE_CASE_ : List[Any]=recopy_gpta , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : int=1_0 , SCREAMING_SNAKE_CASE_ : List[Any]="gpt2_finetuned.pt" , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
SCREAMING_SNAKE_CASE_ : List[Any] = RandomSampler(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Dict = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Tuple = max_steps // (len(SCREAMING_SNAKE_CASE_ )) + 1
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.zeros((1, context_len) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = recopy_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(SCREAMING_SNAKE_CASE_ )
secondary_learner.eval()
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : Any = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE_ : Tuple = compute_perplexity(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
test_perps.append(SCREAMING_SNAKE_CASE_ )
print("Test perplexity, step" , SCREAMING_SNAKE_CASE_ , ":" , SCREAMING_SNAKE_CASE_ )
for epoch in range(int(SCREAMING_SNAKE_CASE_ ) ):
for step, example in enumerate(SCREAMING_SNAKE_CASE_ ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE_ : List[Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE_ : Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = secondary_learner.forward(
torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(SCREAMING_SNAKE_CASE_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 1_0:
SCREAMING_SNAKE_CASE_ : str = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE_ : List[str] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = compute_perplexity(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
test_perps.append(SCREAMING_SNAKE_CASE_ )
print("Test perplexity, step" , SCREAMING_SNAKE_CASE_ , ":" , SCREAMING_SNAKE_CASE_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 6_0:
break
if max_steps > 0 and global_step > 6_0:
break
# save finetuned transformer model
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=3_2 , type=SCREAMING_SNAKE_CASE_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=1_0_0 , type=SCREAMING_SNAKE_CASE_ , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=1_0_0 , type=SCREAMING_SNAKE_CASE_ , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1_0_0_0 , type=SCREAMING_SNAKE_CASE_ , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=1_2_8 , type=SCREAMING_SNAKE_CASE_ , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=1_6 , type=SCREAMING_SNAKE_CASE_ , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=1_0 , type=SCREAMING_SNAKE_CASE_ , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=1_0_0 , type=SCREAMING_SNAKE_CASE_ , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1_0_2_6 , type=SCREAMING_SNAKE_CASE_ , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=1_5 , type=SCREAMING_SNAKE_CASE_ , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=SCREAMING_SNAKE_CASE_ , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=SCREAMING_SNAKE_CASE_ , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=3_2 , max_steps=1_0 , size_objective_set=1_0_0 , min_len=1_0_2_6 , trim=SCREAMING_SNAKE_CASE_ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE_ : Any = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
SCREAMING_SNAKE_CASE_ : str = training_secondary_learner(
SCREAMING_SNAKE_CASE_ , secondary_learner_max_epochs=1_5 , secondary_learner_batch_size=1_2_8 , eval_freq=1_0_0 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE_ : Optional[int] = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(4_2 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = generate_datasets(
context_len=3_2 , file="data/tokenized_stories_train_wikitext103.jbl" , number=1_0_0 , min_len=1_0_2_6 , trim=SCREAMING_SNAKE_CASE_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , context_len=3_2 , max_steps=1_0_0_0 , batch_size=1_6 , threshold=1.0 , recopy_model=SCREAMING_SNAKE_CASE_ , secondary_learner=SCREAMING_SNAKE_CASE_ , eval_interval=1_0 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 421 | 1 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase_ : List[str] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 653 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = StableDiffusionPanoramaPipeline
__A = TEXT_TO_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_BATCH_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase = DDIMScheduler()
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase = CLIPTextModel(_lowerCAmelCase )
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> int:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = """french fries"""
lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase , view_batch_size=2 )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=_lowerCAmelCase )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def _a ( self ) -> str:
'''simple docstring'''
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=_lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = 0
def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None:
lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase = False
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self ) -> int:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase )
lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 653 | 1 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger("""transformers.models.speecht5""")
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ) -> int:
hf_model.apply_weight_norm()
lowerCAmelCase__ : Tuple = checkpoint['''input_conv.weight_g''']
lowerCAmelCase__ : Any = checkpoint['''input_conv.weight_v''']
lowerCAmelCase__ : Optional[int] = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase__ : Any = checkpoint[F"""upsamples.{i}.1.weight_g"""]
lowerCAmelCase__ : List[Any] = checkpoint[F"""upsamples.{i}.1.weight_v"""]
lowerCAmelCase__ : Union[str, Any] = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase__ : Union[str, Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
lowerCAmelCase__ : str = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
lowerCAmelCase__ : int = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
lowerCAmelCase__ : Union[str, Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
lowerCAmelCase__ : Dict = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
lowerCAmelCase__ : Dict = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
lowerCAmelCase__ : List[Any] = checkpoint['''output_conv.1.weight_g''']
lowerCAmelCase__ : Tuple = checkpoint['''output_conv.1.weight_v''']
lowerCAmelCase__ : List[Any] = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , ) -> List[Any]:
if config_path is not None:
lowerCAmelCase__ : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(lowercase__ )
else:
lowerCAmelCase__ : Union[str, Any] = SpeechTaHifiGanConfig()
lowerCAmelCase__ : Optional[Any] = SpeechTaHifiGan(lowercase__ )
lowerCAmelCase__ : str = torch.load(lowercase__ )
load_weights(orig_checkpoint["model"]["generator"] , lowercase__ , lowercase__ )
lowerCAmelCase__ : Any = np.load(lowercase__ )
lowerCAmelCase__ : List[Any] = stats[0].reshape(-1 )
lowerCAmelCase__ : Optional[Any] = stats[1].reshape(-1 )
lowerCAmelCase__ : Optional[int] = torch.from_numpy(lowercase__ ).float()
lowerCAmelCase__ : List[Any] = torch.from_numpy(lowercase__ ).float()
model.save_pretrained(lowercase__ )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 453 |
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )]
# initialize interval's left pointer and right pointer
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0
for i in range(1 , len(snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
__SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__SCREAMING_SNAKE_CASE : Dict = min_edge
while go_next(snake_case , snake_case , snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1
return z_result
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 0 |
from functools import lru_cache
@lru_cache
def __lowerCamelCase (UpperCAmelCase__ : Dict ):
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 | import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowerCamelCase : str = threading.Lock()
_lowerCamelCase : Optional[logging.Handler] = None
_lowerCamelCase : Any = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
_lowerCamelCase : Union[str, Any] = logging.WARNING
_lowerCamelCase : List[Any] = True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_VERBOSITY" , UpperCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __lowerCamelCase ():
return __name__.split("." )[0]
def __lowerCamelCase ():
return logging.getLogger(_get_library_name() )
def __lowerCamelCase ():
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ():
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE = None
def __lowerCamelCase ():
return log_levels
def __lowerCamelCase (UpperCAmelCase__ : Optional[str] = None ):
if name is None:
SCREAMING_SNAKE_CASE = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __lowerCamelCase (UpperCAmelCase__ : int ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __lowerCamelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __lowerCamelCase (UpperCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ():
_configure_library_root_logger()
SCREAMING_SNAKE_CASE = True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(UpperCAmelCase__ )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCAmelCase__ )
def __lowerCamelCase (self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ):
SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , UpperCAmelCase__ )
if no_advisory_warnings:
return
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ )
_lowerCamelCase : str = warning_advice
@functools.lru_cache(UpperCAmelCase__ )
def __lowerCamelCase (self : List[str] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int ):
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ )
_lowerCamelCase : Dict = warning_once
class lowercase :
def __init__( self : List[Any] , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : str ) -> List[Any]: # pylint: disable=unused-argument
'''simple docstring'''
SCREAMING_SNAKE_CASE = args[0] if args else None
def __iter__( self : Optional[Any] ) -> str:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[str] , _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
def empty_fn(*_UpperCamelCase : List[str] , **_UpperCamelCase : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self
def __exit__( self : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return
class lowercase :
def __call__( self : Union[str, Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_UpperCamelCase , **_UpperCamelCase )
else:
return EmptyTqdm(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Dict , *_UpperCamelCase : Dict , **_UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCamelCase : Union[str, Any] = _tqdm_cls()
def __lowerCamelCase ():
global _tqdm_active
return bool(_tqdm_active )
def __lowerCamelCase ():
global _tqdm_active
SCREAMING_SNAKE_CASE = True
hf_hub_utils.enable_progress_bars()
def __lowerCamelCase ():
global _tqdm_active
SCREAMING_SNAKE_CASE = False
hf_hub_utils.disable_progress_bars()
| 647 | 0 |
def UpperCamelCase_ ( __a ) -> float:
a__ : Any = 0
while len(__a ) > 1:
a__ : int = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
a__ : str = files.index(min(__a ) )
temp += files[min_index]
files.pop(__a )
files.append(__a )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : int = '''timesformer'''
def __init__( self , _A=224 , _A=16 , _A=3 , _A=8 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.0 , _A=0.0 , _A=0.0_2 , _A=1e-6 , _A=True , _A="divided_space_time" , _A=0 , **_A , ):
super().__init__(**_A )
__A : Union[str, Any] = image_size
__A : Dict = patch_size
__A : Any = num_channels
__A : Optional[int] = num_frames
__A : List[Any] = hidden_size
__A : List[Any] = num_hidden_layers
__A : List[str] = num_attention_heads
__A : Optional[Any] = intermediate_size
__A : int = hidden_act
__A : int = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : List[str] = initializer_range
__A : Optional[int] = layer_norm_eps
__A : int = qkv_bias
__A : List[str] = attention_type
__A : int = drop_path_rate
| 239 | 0 |
def lowercase ( SCREAMING_SNAKE_CASE__ : int = 4_000_000 ) -> int:
_snake_case : Any = []
_snake_case , _snake_case : str = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : Optional[int] = b, a + b
return sum(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 198 |
def lowercase ( SCREAMING_SNAKE_CASE__ : int = 1_000 ) -> int:
_snake_case , _snake_case : str = 1, 1
_snake_case : List[Any] = 2
while True:
_snake_case : Union[str, Any] = 0
_snake_case : int = fa + fa
_snake_case , _snake_case : Union[str, Any] = fa, f
index += 1
for _ in str(SCREAMING_SNAKE_CASE__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 198 | 1 |
import string
def a ( snake_case__: str ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
lowercase_ = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
lowercase_ = string.ascii_uppercase.find(snake_case__ )
lowercase_ = num - key
if num < 0:
lowercase_ = num + len(string.ascii_uppercase )
lowercase_ = translated + string.ascii_uppercase[num]
else:
lowercase_ = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def a ( ):
'''simple docstring'''
lowercase_ = input('''Encrypted message: ''' )
lowercase_ = message.upper()
decrypt(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 97 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : List[str] = inspect.getfile(accelerate.test_utils )
A : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
A : str = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
A : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
A : List[Any] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
A : str = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
A : Any = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
A : Optional[int] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase_ = Accelerator()
UpperCamelCase_ = (accelerator.state.process_index + 2, 10)
UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase_ = ""
UpperCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 256 | 0 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowercase_ ( _A : Tuple , _A : int , _A : List[str] , _A : Any=5 ):
"""simple docstring"""
assert masked_input.count("<mask>" ) == 1
lowerCamelCase__ : List[str] = torch.tensor(tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) ).unsqueeze(0 ) # Batch size 1
lowerCamelCase__ : str = model(snake_case__ )[0] # The last hidden-state is the first element of the output tuple
lowerCamelCase__ : str = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowerCamelCase__ : Dict = logits[0, masked_index, :]
lowerCamelCase__ : int = logits.softmax(dim=0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = prob.topk(k=snake_case__ , dim=0 )
lowerCamelCase__ : Any = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(snake_case__ ) )] )
lowerCamelCase__ : Optional[Any] = tokenizer.mask_token
lowerCamelCase__ : Optional[Any] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
lowerCamelCase__ : int = predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(snake_case__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(snake_case__ ) , snake_case__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(snake_case__ , snake_case__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
A : Any = CamembertTokenizer.from_pretrained("camembert-base")
A : int = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
A : str = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 721 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
A : Tuple = logging.get_logger(__name__)
A : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
A : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
A : Union[str, Any] = {
"allenai/led-base-16384": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : str = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowerCamelCase__ : Any = bs[:]
lowerCamelCase__ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
lowerCamelCase__ : Any = [chr(_A ) for n in cs]
return dict(zip(_A , _A ) )
def lowercase_ ( _A : Any ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = set()
lowerCamelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase__ : Any = char
return pairs
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ["input_ids", "attention_mask"]
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
lowerCamelCase__ : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
lowerCamelCase__ : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
lowerCamelCase__ : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
lowerCamelCase__ : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
lowerCamelCase__ : Union[str, Any] = json.load(__lowerCamelCase )
lowerCamelCase__ : List[str] = {v: k for k, v in self.encoder.items()}
lowerCamelCase__ : Union[str, Any] = errors # how to handle errors in decoding
lowerCamelCase__ : List[Any] = bytes_to_unicode()
lowerCamelCase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
lowerCamelCase__ : List[Any] = merges_handle.read().split("\n" )[1:-1]
lowerCamelCase__ : str = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase__ : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase__ : List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase__ : Union[str, Any] = tuple(__lowerCamelCase )
lowerCamelCase__ : Tuple = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
lowerCamelCase__ : str = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = bigram
lowerCamelCase__ : int = []
lowerCamelCase__ : int = 0
while i < len(__lowerCamelCase ):
try:
lowerCamelCase__ : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase__ : List[str] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase__ : Dict = tuple(__lowerCamelCase )
lowerCamelCase__ : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
lowerCamelCase__ : List[str] = get_pairs(__lowerCamelCase )
lowerCamelCase__ : Optional[int] = " ".join(__lowerCamelCase )
lowerCamelCase__ : Dict = word
return word
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = []
for token in re.findall(self.pat , __lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def lowerCAmelCase ( self : Any , __lowerCamelCase : int ):
'''simple docstring'''
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.decoder.get(__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = "".join(__lowerCamelCase )
lowerCamelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase__ : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase__ : Union[str, Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
lowerCamelCase__ : Tuple = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
lowerCamelCase__ : List[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
lowerCamelCase__ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowerCamelCase__ : Any = [self.sep_token_id]
lowerCamelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict=False , **__lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
lowerCamelCase__ : Dict = " " + text
return (text, kwargs)
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
lowerCamelCase__ : str = super()._pad(
encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
lowerCamelCase__ : str = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCamelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCamelCase__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase )
if needs_to_be_padded:
lowerCamelCase__ : Dict = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCamelCase__ : Optional[int] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowerCamelCase__ : Union[str, Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 5 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class a_ ( __lowercase ):
lowerCamelCase__ : Any = """gptj"""
lowerCamelCase__ : Optional[Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCAmelCase=5_04_00 , UpperCAmelCase=20_48 , UpperCAmelCase=40_96 , UpperCAmelCase=28 , UpperCAmelCase=16 , UpperCAmelCase=64 , UpperCAmelCase=None , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.02 , UpperCAmelCase=True , UpperCAmelCase=5_02_56 , UpperCAmelCase=5_02_56 , UpperCAmelCase=False , **UpperCAmelCase , ):
a_ = vocab_size
a_ = n_positions
a_ = n_embd
a_ = n_layer
a_ = n_head
a_ = n_inner
a_ = rotary_dim
a_ = activation_function
a_ = resid_pdrop
a_ = embd_pdrop
a_ = attn_pdrop
a_ = layer_norm_epsilon
a_ = initializer_range
a_ = use_cache
a_ = bos_token_id
a_ = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class a_ ( __lowercase ):
def __init__( self , UpperCAmelCase , UpperCAmelCase = "default" , UpperCAmelCase = None , UpperCAmelCase = False , ):
super().__init__(SCREAMING_SNAKE_CASE__ , task=SCREAMING_SNAKE_CASE__ , patching_specs=SCREAMING_SNAKE_CASE__ , use_past=SCREAMING_SNAKE_CASE__ )
if not getattr(self._config , """pad_token_id""" , SCREAMING_SNAKE_CASE__ ):
# TODO: how to do that better?
a_ = 0
@property
def lowerCAmelCase__ ( self ):
a_ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction="""inputs""" )
a_ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
a_ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCAmelCase__ ( self ):
return self._config.n_layer
@property
def lowerCAmelCase__ ( self ):
return self._config.n_head
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
a_ = super(SCREAMING_SNAKE_CASE__ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
# We need to order the input in the way they appears in the forward()
a_ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
a_ , a_ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
a_ = seqlen + 2
a_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
a_ = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(self.num_layers )
]
a_ = common_inputs["""attention_mask"""]
if self.use_past:
a_ = ordered_inputs["""attention_mask"""].dtype
a_ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase__ ( self ):
return 13
| 263 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( __lowercase , __lowercase ):
@register_to_config
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None ) -> List[str]:
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase__ ( __lowercase ):
a__ : VQModel
a__ : CLIPTextModel
a__ : CLIPTokenizer
a__ : TransformeraDModel
a__ : LearnedClassifierFreeSamplingEmbeddings
a__ : VQDiffusionScheduler
def __init__( self : int , SCREAMING_SNAKE_CASE__ : VQModel , SCREAMING_SNAKE_CASE__ : CLIPTextModel , SCREAMING_SNAKE_CASE__ : CLIPTokenizer , SCREAMING_SNAKE_CASE__ : TransformeraDModel , SCREAMING_SNAKE_CASE__ : VQDiffusionScheduler , SCREAMING_SNAKE_CASE__ : LearnedClassifierFreeSamplingEmbeddings , ) -> Any:
super().__init__()
self.register_modules(
vqvae=SCREAMING_SNAKE_CASE__ , transformer=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , learned_classifier_free_sampling_embeddings=SCREAMING_SNAKE_CASE__ , )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
__lowerCamelCase = len(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=SCREAMING_SNAKE_CASE__ )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(SCREAMING_SNAKE_CASE__ , 1 , 1 )
else:
__lowerCamelCase = [''''''] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=SCREAMING_SNAKE_CASE__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , SCREAMING_SNAKE_CASE__ , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : int , SCREAMING_SNAKE_CASE__ : Union[str, List[str]] , SCREAMING_SNAKE_CASE__ : int = 1_00 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE__ : int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE__ )}''' )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(SCREAMING_SNAKE_CASE__ )}.''' )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ ).sample
if do_classifier_free_guidance:
__lowerCamelCase , __lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(SCREAMING_SNAKE_CASE__ , dim=1 , keepdim=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.truncate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(SCREAMING_SNAKE_CASE__ , shape=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.vqvae.decode(SCREAMING_SNAKE_CASE__ , force_not_quantize=SCREAMING_SNAKE_CASE__ ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : float ) -> torch.FloatTensor:
__lowerCamelCase , __lowerCamelCase = torch.sort(SCREAMING_SNAKE_CASE__ , 1 , descending=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.exp(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 298 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 719 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( a , a ):
print(f'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(a ):
print(f'{i}\t\t{d}' )
def lowerCamelCase__ ( a , a , a ):
for j in range(a ):
__snake_case , __snake_case , __snake_case = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def lowerCamelCase__ ( a , a , a , a ):
__snake_case = [float('inf' )] * vertex_count
__snake_case = 0.0
for _ in range(vertex_count - 1 ):
for j in range(a ):
__snake_case , __snake_case , __snake_case = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
__snake_case = distance[u] + w
__snake_case = check_negative_cycle(a , a , a )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = int(input("""Enter number of vertices: """).strip())
_lowercase = int(input("""Enter number of edges: """).strip())
_lowercase = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
_lowercase , _lowercase , _lowercase = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
_lowercase = {"""src""": src, """dst""": dest, """weight""": weight}
_lowercase = int(input("""\nEnter shortest path source:""").strip())
_lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 427 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def snake_case ( lowerCAmelCase_=None ) -> int:
if subparsers is not None:
_snake_case = subparsers.add_parser('''test''' )
else:
_snake_case = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=lowerCAmelCase_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case ( lowerCAmelCase_ ) -> int:
_snake_case = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
_snake_case = script_name
else:
_snake_case = f"""--config_file={args.config_file} {script_name}"""
_snake_case = ['''accelerate-launch'''] + test_args.split()
_snake_case = execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def snake_case ( ) -> List[Any]:
_snake_case = test_command_parser()
_snake_case = parser.parse_args()
test_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 103 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase : Any = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
lowercase : Tuple = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCAmelCase_ (_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ):
for attribute in key.split("." ):
__UpperCamelCase : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
__UpperCamelCase : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
__UpperCamelCase : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__UpperCamelCase : int = value
elif weight_type == "weight_g":
__UpperCamelCase : List[Any] = value
elif weight_type == "weight_v":
__UpperCamelCase : List[str] = value
elif weight_type == "bias":
__UpperCamelCase : int = value
else:
__UpperCamelCase : Any = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ (_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
__UpperCamelCase : Tuple = []
__UpperCamelCase : int = fairseq_model.state_dict()
__UpperCamelCase : Tuple = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
__UpperCamelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__UpperCamelCase : Union[str, Any] = True
if "*" in mapped_key:
__UpperCamelCase : List[str] = name.split(_lowerCAmelCase )[0].split("." )[-2]
__UpperCamelCase : List[str] = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
__UpperCamelCase : int = "weight_g"
elif "weight_v" in name:
__UpperCamelCase : Dict = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
__UpperCamelCase : Any = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase : List[Any] = "weight"
else:
__UpperCamelCase : List[Any] = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCAmelCase_ (_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ):
__UpperCamelCase : Tuple = full_name.split("conv_layers." )[-1]
__UpperCamelCase : Any = name.split("." )
__UpperCamelCase : Dict = int(items[0] )
__UpperCamelCase : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__UpperCamelCase : Optional[int] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__UpperCamelCase : List[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__UpperCamelCase : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__UpperCamelCase : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def UpperCAmelCase_ (_lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=None ):
# load the pre-trained checkpoints
__UpperCamelCase : Tuple = torch.load(_lowerCAmelCase )
__UpperCamelCase : Tuple = WavLMConfigOrig(checkpoint["cfg"] )
__UpperCamelCase : Any = WavLMOrig(_lowerCAmelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
__UpperCamelCase : List[str] = WavLMConfig.from_pretrained(_lowerCAmelCase )
else:
__UpperCamelCase : Optional[int] = WavLMConfig()
__UpperCamelCase : Optional[Any] = WavLMModel(_lowerCAmelCase )
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavlm.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowercase : Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 327 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class a__( lowerCamelCase__ ):
lowercase__ = """SpeechT5FeatureExtractor"""
lowercase__ = """SpeechT5Tokenizer"""
def __init__( self : Dict , __snake_case : int , __snake_case : str ):
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , *__snake_case : Tuple , **__snake_case : Optional[Any] ):
a : List[str] = kwargs.pop('audio' , __snake_case )
a : str = kwargs.pop('text' , __snake_case )
a : Union[str, Any] = kwargs.pop('text_target' , __snake_case )
a : List[Any] = kwargs.pop('audio_target' , __snake_case )
a : Optional[Any] = kwargs.pop('sampling_rate' , __snake_case )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
a : Optional[int] = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
elif text is not None:
a : List[str] = self.tokenizer(__snake_case , **__snake_case )
else:
a : str = None
if audio_target is not None:
a : List[str] = self.feature_extractor(audio_target=__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
a : Optional[Any] = targets['input_values']
elif text_target is not None:
a : Tuple = self.tokenizer(__snake_case , **__snake_case )
a : Dict = targets['input_ids']
else:
a : str = None
if inputs is None:
return targets
if targets is not None:
a : List[Any] = labels
a : Any = targets.get('attention_mask' )
if decoder_attention_mask is not None:
a : List[Any] = decoder_attention_mask
return inputs
def lowercase_ ( self : List[str] , *__snake_case : Dict , **__snake_case : List[str] ):
a : Optional[Any] = kwargs.pop('input_values' , __snake_case )
a : Any = kwargs.pop('input_ids' , __snake_case )
a : Any = kwargs.pop('labels' , __snake_case )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
a : Dict = self.feature_extractor.pad(__snake_case , *__snake_case , **__snake_case )
elif input_ids is not None:
a : List[str] = self.tokenizer.pad(__snake_case , **__snake_case )
else:
a : Any = None
if labels is not None:
if "input_ids" in labels or (isinstance(__snake_case , __snake_case ) and "input_ids" in labels[0]):
a : Union[str, Any] = self.tokenizer.pad(__snake_case , **__snake_case )
a : Dict = targets['input_ids']
else:
a : List[Any] = self.feature_extractor.feature_size
a : Union[str, Any] = self.feature_extractor.num_mel_bins
a : Optional[Any] = self.feature_extractor.pad(__snake_case , *__snake_case , **__snake_case )
a : Dict = feature_size_hack
a : List[str] = targets['input_values']
else:
a : List[str] = None
if inputs is None:
return targets
if targets is not None:
a : str = labels
a : str = targets.get('attention_mask' )
if decoder_attention_mask is not None:
a : Tuple = decoder_attention_mask
return inputs
def lowercase_ ( self : Any , *__snake_case : List[str] , **__snake_case : Dict ):
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowercase_ ( self : Union[str, Any] , *__snake_case : List[Any] , **__snake_case : Dict ):
return self.tokenizer.decode(*__snake_case , **__snake_case ) | 703 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCamelCase__ ( _A ):
return (data["data"], data["target"])
def lowerCamelCase__ ( _A , _A , _A ):
a : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_A , _A )
# Predict target for test data
a : Any = xgb.predict(_A )
a : Any = predictions.reshape(len(_A ) , 1 )
return predictions
def lowerCamelCase__ ( ):
a : Optional[int] = fetch_california_housing()
a , a : Optional[Any] = data_handling(_A )
a , a , a , a : Dict = train_test_split(
_A , _A , test_size=0.25 , random_state=1 )
a : List[str] = xgboost(_A , _A , _A )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_A , _A )}""" )
print(f"""Mean Square Error : {mean_squared_error(_A , _A )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main() | 195 | 0 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=2 , SCREAMING_SNAKE_CASE_ : Dict=56 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : List[Any]=99 , SCREAMING_SNAKE_CASE_ : Any=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Any=2 , SCREAMING_SNAKE_CASE_ : Any=7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu_new" , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : str=512 , SCREAMING_SNAKE_CASE_ : Any=16 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE_ : int=4 , SCREAMING_SNAKE_CASE_ : List[str]="block_sparse" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=3 , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_attention_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_choices
lowerCamelCase__ = rescale_embeddings
lowerCamelCase__ = attention_type
lowerCamelCase__ = use_bias
lowerCamelCase__ = block_size
lowerCamelCase__ = num_random_blocks
def __UpperCAmelCase ( self : List[str] ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_attention_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : Optional[int] ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
snake_case = False
snake_case = False
def __UpperCAmelCase ( self : Dict ):
lowerCamelCase__ = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Optional[int] ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Dict ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : List[str] ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : List[str] ):
super().test_hidden_states_output()
@slow
def __UpperCAmelCase ( self : str ):
for model_class_name in self.all_model_classes:
lowerCamelCase__ = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Dict ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Optional[int] ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , **SCREAMING_SNAKE_CASE_ : Dict ):
return model(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase__ = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase__ = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int=1e-5 , SCREAMING_SNAKE_CASE_ : Optional[Any]="outputs" , SCREAMING_SNAKE_CASE_ : Optional[int]=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 129 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = ["image_processor", "tokenizer"]
snake_case = "FlavaImageProcessor"
snake_case = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Any=None , **SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCamelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , SCREAMING_SNAKE_CASE_ , )
lowerCamelCase__ = kwargs.pop("""feature_extractor""" )
lowerCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = self.image_processor
def __call__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[ImageInput] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Union[bool, str, TruncationStrategy] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowerCamelCase__ = self.tokenizer(
text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if images is not None:
lowerCamelCase__ = self.image_processor(
SCREAMING_SNAKE_CASE_ , return_image_mask=SCREAMING_SNAKE_CASE_ , return_codebook_pixels=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if text is not None and images is not None:
encoding.update(SCREAMING_SNAKE_CASE_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : str ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Any , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : str ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCAmelCase ( self : Any ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 129 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
UpperCAmelCase_ = False
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self ) -> Tuple:
return 12
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
return 12
@property
def UpperCamelCase__ ( self ) -> List[str]:
return 32
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
torch.manual_seed(0 )
__a = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
__a = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCamelCase__ ( self ) -> Tuple:
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCamelCase )
@property
def UpperCamelCase__ ( self ) -> Tuple:
torch.manual_seed(0 )
__a = 12
__a = 12
__a = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
__a = TransformeraDModel(**UpperCamelCase )
return model
def UpperCamelCase__ ( self ) -> Optional[Any]:
__a = 'cpu'
__a = self.dummy_vqvae
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_transformer
__a = VQDiffusionScheduler(self.num_embed )
__a = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase )
__a = VQDiffusionPipeline(
vqvae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , transformer=UpperCamelCase , scheduler=UpperCamelCase , learned_classifier_free_sampling_embeddings=UpperCamelCase , )
__a = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__a = 'teddy bear playing in the pool'
__a = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
__a = pipe([prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type='np' )
__a = output.images
__a = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
__a = pipe(
[prompt] , generator=UpperCamelCase , output_type='np' , return_dict=UpperCamelCase , num_inference_steps=2 )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__a = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ) -> int:
__a = 'cpu'
__a = self.dummy_vqvae
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_transformer
__a = VQDiffusionScheduler(self.num_embed )
__a = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCamelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__a = VQDiffusionPipeline(
vqvae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , transformer=UpperCamelCase , scheduler=UpperCamelCase , learned_classifier_free_sampling_embeddings=UpperCamelCase , )
__a = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__a = 'teddy bear playing in the pool'
__a = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
__a = pipe([prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type='np' )
__a = output.images
__a = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
__a = pipe(
[prompt] , generator=UpperCamelCase , output_type='np' , return_dict=UpperCamelCase , num_inference_steps=2 )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__a = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
__a = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
__a = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__a = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
__a = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCamelCase , output_type='np' , )
__a = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 490 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE ( a_ : Optional[Any] , a_ : int , a_ : Tuple , a_ : Tuple ):
__a = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__a = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
__a = f"{src_lang}-{tgt_lang}"
__a = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a_ , exist_ok=a_ )
__a = os.path.join(a_ , 'README.md' )
print(f"Generating {path}" )
with open(a_ , 'w' , encoding='utf-8' ) as f:
f.write(a_ )
# make sure we are under the root of the project
UpperCAmelCase_ = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase_ = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCAmelCase_ = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 490 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : Tuple ) -> int:
__a = 0.00
__a = 0
for resistor in resistors:
if resistor <= 0:
__a = f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(_A )
first_sum += 1 / float(_A )
index += 1
return 1 / first_sum
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> str:
__a = 0.00
__a = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__a = f'''Resistor at index {index} has a negative value!'''
raise ValueError(_A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | import random
class _lowercase :
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
_lowercase = [ord(UpperCAmelCase ) for i in text]
_lowercase = []
_lowercase = []
for i in plain:
_lowercase = random.randint(1 , 300 )
_lowercase = (i + k) * k
cipher.append(UpperCAmelCase )
key.append(UpperCAmelCase )
return cipher, key
@staticmethod
def _UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowercase = []
for i in range(len(UpperCAmelCase ) ):
_lowercase = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(UpperCAmelCase ) )
return "".join(UpperCAmelCase )
if __name__ == "__main__":
A_ , A_: List[Any] = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 398 | 0 |
import logging
from transformers import PretrainedConfig
lowerCAmelCase : Tuple =logging.getLogger(__name__)
lowerCAmelCase : Tuple ={
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Union[str, Any] = "bertabs"
def __init__( self , lowercase_=30522 , lowercase_=512 , lowercase_=6 , lowercase_=512 , lowercase_=8 , lowercase_=512 , lowercase_=0.2 , lowercase_=6 , lowercase_=768 , lowercase_=8 , lowercase_=2048 , lowercase_=0.2 , **lowercase_ , ) -> Optional[Any]:
super().__init__(**lowercase_ )
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : List[Any] = max_pos
lowerCAmelCase : Optional[Any] = enc_layers
lowerCAmelCase : int = enc_hidden_size
lowerCAmelCase : Union[str, Any] = enc_heads
lowerCAmelCase : Any = enc_ff_size
lowerCAmelCase : List[Any] = enc_dropout
lowerCAmelCase : Any = dec_layers
lowerCAmelCase : Any = dec_hidden_size
lowerCAmelCase : int = dec_heads
lowerCAmelCase : int = dec_ff_size
lowerCAmelCase : int = dec_dropout
| 693 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ={
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _a ( snake_case_ ):
_UpperCamelCase: List[str] = "detr"
_UpperCamelCase: Dict = ["past_key_values"]
_UpperCamelCase: Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[int] = config_class.from_dict(lowercase_ )
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = None, None, None
lowerCAmelCase : Any = use_timm_backbone
lowerCAmelCase : int = backbone_config
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : Optional[Any] = num_queries
lowerCAmelCase : List[str] = d_model
lowerCAmelCase : Optional[int] = encoder_ffn_dim
lowerCAmelCase : Dict = encoder_layers
lowerCAmelCase : str = encoder_attention_heads
lowerCAmelCase : List[Any] = decoder_ffn_dim
lowerCAmelCase : List[Any] = decoder_layers
lowerCAmelCase : Union[str, Any] = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : Dict = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Optional[int] = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Tuple = encoder_layers
lowerCAmelCase : Optional[int] = auxiliary_loss
lowerCAmelCase : List[str] = position_embedding_type
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = use_pretrained_backbone
lowerCAmelCase : List[Any] = dilation
# Hungarian matcher
lowerCAmelCase : Tuple = class_cost
lowerCAmelCase : Union[str, Any] = bbox_cost
lowerCAmelCase : Optional[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : List[Any] = mask_loss_coefficient
lowerCAmelCase : Optional[int] = dice_loss_coefficient
lowerCAmelCase : Tuple = bbox_loss_coefficient
lowerCAmelCase : Dict = giou_loss_coefficient
lowerCAmelCase : str = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> int:
return self.encoder_attention_heads
@property
def _snake_case ( self ) -> int:
return self.d_model
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> Any:
return cls(backbone_config=lowercase_ , **lowercase_ )
def _snake_case ( self ) -> Dict[str, any]:
lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase : List[str] = self.backbone_config.to_dict()
lowerCAmelCase : List[Any] = self.__class__.model_type
return output
class _a ( snake_case_ ):
_UpperCamelCase: Any = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-5
@property
def _snake_case ( self ) -> int:
return 12
| 693 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = seq_length
_lowerCamelCase : Any = is_training
_lowerCamelCase : int = use_input_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Any = type_sequence_label_size
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : Optional[Any] = num_labels
_lowerCamelCase : List[str] = num_choices
_lowerCamelCase : int = scope
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : Union[str, Any] = None
if self.use_input_mask:
_lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCamelCase : int = None
if self.use_token_type_ids:
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_lowerCamelCase : Dict = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices)
_lowerCamelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self) -> Dict:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase : str = NystromformerModel(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[Any]:
_lowerCamelCase : Union[str, Any] = NystromformerForMaskedLM(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Any:
_lowerCamelCase : List[str] = NystromformerForQuestionAnswering(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : Dict = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[Any]:
_lowerCamelCase : Union[str, Any] = self.num_labels
_lowerCamelCase : int = NystromformerForSequenceClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : str = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : Tuple = self.num_labels
_lowerCamelCase : int = NystromformerForTokenClassification(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase : int = self.num_choices
_lowerCamelCase : Any = NystromformerForMultipleChoice(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : Optional[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_lowerCamelCase : List[str] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_lowerCamelCase : Dict = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_lowerCamelCase : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase_ ( self) -> List[Any]:
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( A_ ,A_ ,unittest.TestCase ):
__UpperCAmelCase = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : Union[str, Any] = NystromformerModelTester(self)
_lowerCamelCase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37)
def UpperCamelCase_ ( self) -> str:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Tuple:
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : int = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Union[str, Any]:
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE)
@slow
def UpperCamelCase_ ( self) -> Optional[int]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = NystromformerModel.from_pretrained(SCREAMING_SNAKE_CASE)
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
@require_torch
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Union[str, Any] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""")
_lowerCamelCase : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]])
with torch.no_grad():
_lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE)[0]
_lowerCamelCase : Any = torch.Size((1, 6, 768))
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4))
@slow
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Optional[Any] = """the [MASK] of Belgium is Brussels"""
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""")
_lowerCamelCase : List[Any] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""")
_lowerCamelCase : str = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="""pt""")
with torch.no_grad():
_lowerCamelCase : Tuple = model(encoding.input_ids).logits
_lowerCamelCase : str = token_logits[:, 2, :].argmax(-1)[0]
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE) , """capital""")
| 88 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : Any ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class __UpperCamelCase ( A__ ):
def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not hasattr(_UpperCamelCase , '''key_handler''' ):
setattr(_UpperCamelCase , '''key_handler''' , {} )
setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase = value
return new_cls
@staticmethod
def UpperCamelCase( cls ):
_UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase = ord(_UpperCamelCase )
_UpperCAmelCase = cls.key_handler.get(_UpperCamelCase )
if handler:
_UpperCAmelCase = char
return handler(cls )
else:
return None
def A__ ( cls : Union[str, Any] ) -> Any:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 32 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 592 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _a ( A__ ):
"""simple docstring"""
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
_UpperCAmelCase =BertTokenizer.from_pretrained("bert-base-uncased" )
_UpperCAmelCase =bertabert.config.encoder.vocab_size
_UpperCAmelCase =tokenizer.sep_token_id
_UpperCAmelCase =tokenizer.cls_token_id
_UpperCAmelCase =128
_UpperCAmelCase =datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
_UpperCAmelCase =datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
_UpperCAmelCase =train_dataset.select(range(32 ) )
_UpperCAmelCase =val_dataset.select(range(16 ) )
_UpperCAmelCase =4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase =tokenizer(batch["article"] , padding="max_length" , truncation=_snake_case , max_length=512 )
_UpperCAmelCase =tokenizer(batch["highlights"] , padding="max_length" , truncation=_snake_case , max_length=128 )
_UpperCAmelCase =inputs.input_ids
_UpperCAmelCase =inputs.attention_mask
_UpperCAmelCase =outputs.input_ids
_UpperCAmelCase =outputs.input_ids.copy()
_UpperCAmelCase =[
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
_UpperCAmelCase =outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
_UpperCAmelCase =pred.label_ids
_UpperCAmelCase =pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase =tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCAmelCase =tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCAmelCase =sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase =train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
_UpperCAmelCase =val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
_UpperCAmelCase =self.get_auto_remove_tmp_dir()
_UpperCAmelCase =SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy="steps" , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCAmelCase =SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 592 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = 3_8_4
__magic_name__ :Tuple = 7
if "tiny" in model_name:
__magic_name__ :Dict = 9_6
__magic_name__ :List[str] = (2, 2, 6, 2)
__magic_name__ :List[str] = (3, 6, 1_2, 2_4)
elif "small" in model_name:
__magic_name__ :Dict = 9_6
__magic_name__ :Dict = (2, 2, 1_8, 2)
__magic_name__ :Any = (3, 6, 1_2, 2_4)
elif "base" in model_name:
__magic_name__ :Any = 1_2_8
__magic_name__ :Dict = (2, 2, 1_8, 2)
__magic_name__ :Optional[int] = (4, 8, 1_6, 3_2)
__magic_name__ :List[Any] = 1_2
__magic_name__ :Union[str, Any] = 5_1_2
elif "large" in model_name:
__magic_name__ :List[str] = 1_9_2
__magic_name__ :int = (2, 2, 1_8, 2)
__magic_name__ :Tuple = (6, 1_2, 2_4, 4_8)
__magic_name__ :Union[str, Any] = 1_2
__magic_name__ :Tuple = 7_6_8
# set label information
__magic_name__ :Tuple = 1_5_0
__magic_name__ :Union[str, Any] = '''huggingface/label-files'''
__magic_name__ :List[Any] = '''ade20k-id2label.json'''
__magic_name__ :Tuple = json.load(open(hf_hub_download(snake_case, snake_case, repo_type='''dataset''' ), '''r''' ) )
__magic_name__ :Tuple = {int(snake_case ): v for k, v in idalabel.items()}
__magic_name__ :Dict = {v: k for k, v in idalabel.items()}
__magic_name__ :Any = SwinConfig(
embed_dim=snake_case, depths=snake_case, num_heads=snake_case, window_size=snake_case, out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''], )
__magic_name__ :Any = UperNetConfig(
backbone_config=snake_case, auxiliary_in_channels=snake_case, num_labels=snake_case, idalabel=snake_case, labelaid=snake_case, )
return config
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[int] = dct.pop(snake_case )
__magic_name__ :Optional[int] = val
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Any = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__magic_name__ :List[str] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__magic_name__ :Dict = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
__magic_name__ :Dict = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ :Optional[int] = in_proj_weight[:dim, :]
__magic_name__ :Optional[Any] = in_proj_bias[: dim]
__magic_name__ :int = in_proj_weight[
dim : dim * 2, :
]
__magic_name__ :List[str] = in_proj_bias[
dim : dim * 2
]
__magic_name__ :Dict = in_proj_weight[
-dim :, :
]
__magic_name__ :Any = in_proj_bias[-dim :]
# fmt: on
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Tuple = x.shape
__magic_name__ :int = x.reshape(snake_case, 4, in_channel // 4 )
__magic_name__ :str = x[:, [0, 2, 1, 3], :].transpose(1, 2 ).reshape(snake_case, snake_case )
return x
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :List[Any] = x.shape
__magic_name__ :Optional[int] = x.reshape(snake_case, in_channel // 4, 4 )
__magic_name__ :Tuple = x[:, :, [0, 2, 1, 3]].transpose(1, 2 ).reshape(snake_case, snake_case )
return x
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Dict = x.shape[0]
__magic_name__ :Dict = x.reshape(4, in_channel // 4 )
__magic_name__ :List[str] = x[[0, 2, 1, 3], :].transpose(0, 1 ).reshape(snake_case )
return x
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = x.shape[0]
__magic_name__ :Dict = x.reshape(in_channel // 4, 4 )
__magic_name__ :Optional[Any] = x[:, [0, 2, 1, 3]].transpose(0, 1 ).reshape(snake_case )
return x
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
__magic_name__ :Dict = model_name_to_url[model_name]
__magic_name__ :Dict = torch.hub.load_state_dict_from_url(snake_case, map_location='''cpu''', file_name=snake_case )[
'''state_dict'''
]
for name, param in state_dict.items():
print(snake_case, param.shape )
__magic_name__ :Any = get_upernet_config(snake_case )
__magic_name__ :str = UperNetForSemanticSegmentation(snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__magic_name__ :Optional[Any] = state_dict.pop(snake_case )
if "bn" in key:
__magic_name__ :Tuple = key.replace('''bn''', '''batch_norm''' )
__magic_name__ :List[str] = val
# rename keys
__magic_name__ :Union[str, Any] = create_rename_keys(snake_case )
for src, dest in rename_keys:
rename_key(snake_case, snake_case, snake_case )
read_in_q_k_v(snake_case, config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__magic_name__ :Dict = reverse_correct_unfold_reduction_order(snake_case )
if "norm" in key:
__magic_name__ :Any = reverse_correct_unfold_norm_order(snake_case )
model.load_state_dict(snake_case )
# verify on image
__magic_name__ :str = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__magic_name__ :Any = Image.open(requests.get(snake_case, stream=snake_case ).raw ).convert('''RGB''' )
__magic_name__ :List[Any] = SegformerImageProcessor()
__magic_name__ :str = processor(snake_case, return_tensors='''pt''' ).pixel_values
with torch.no_grad():
__magic_name__ :List[Any] = model(snake_case )
__magic_name__ :List[Any] = outputs.logits
print(logits.shape )
print('''First values of logits:''', logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__magic_name__ :List[Any] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
__magic_name__ :List[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
__magic_name__ :Dict = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
__magic_name__ :Union[str, Any] = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('''Logits:''', outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case, atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f"upernet-swin-{size}" for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 | '''simple docstring'''
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( ):
from torch.utils.cpp_extension import load
_A = Path(__snake_case ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
_A = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , __snake_case , with_cuda=__snake_case , extra_include_paths=[str(__snake_case )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 107 | 0 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
SCREAMING_SNAKE_CASE = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class __a ( unittest.TestCase ):
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple )-> int:
"""simple docstring"""
UpperCamelCase = TOKEN
HfFolder.save_token(lowerCAmelCase_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any )-> Optional[int]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Any:
"""simple docstring"""
UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCamelCase = FlaxBertModel(lowerCAmelCase_ )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
UpperCamelCase = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax" )
UpperCamelCase = flatten_dict(unfreeze(model.params ) )
UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1e-3 , msg=f"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase_ , repo_id="test-model-flax" , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
UpperCamelCase = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax" )
UpperCamelCase = flatten_dict(unfreeze(model.params ) )
UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1e-3 , msg=f"{key} not identical" )
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> List[str]:
"""simple docstring"""
UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCamelCase = FlaxBertModel(lowerCAmelCase_ )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
UpperCamelCase = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
UpperCamelCase = flatten_dict(unfreeze(model.params ) )
UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1e-3 , msg=f"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCAmelCase_ , repo_id="valid_org/test-model-flax-org" , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
UpperCamelCase = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
UpperCamelCase = flatten_dict(unfreeze(model.params ) )
UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1e-3 , msg=f"{key} not identical" )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = flatten_dict(modela.params )
UpperCamelCase = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
UpperCamelCase = False
return models_are_equal
@require_flax
class __a ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> int:
"""simple docstring"""
UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
UpperCamelCase = FlaxBertModel(lowerCAmelCase_ )
UpperCamelCase = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
with self.assertRaises(lowerCAmelCase_ ):
UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase_ )
UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ )
self.assertTrue(check_models_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Any:
"""simple docstring"""
UpperCamelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
UpperCamelCase = FlaxBertModel(lowerCAmelCase_ )
UpperCamelCase = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , max_shard_size="10KB" )
with self.assertRaises(lowerCAmelCase_ ):
UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase_ )
UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ )
self.assertTrue(check_models_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> int:
"""simple docstring"""
UpperCamelCase = "bert"
UpperCamelCase = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(lowerCAmelCase_ ):
UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase_ )
UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str )-> Dict:
"""simple docstring"""
UpperCamelCase = "bert"
UpperCamelCase = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(lowerCAmelCase_ ):
UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase_ )
UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
| 717 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a ( _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : str = FunnelTokenizer
UpperCamelCase_ : Any = FunnelTokenizerFast
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Tuple = True
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> Optional[int]:
"""simple docstring"""
super().setUp()
UpperCamelCase = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **UpperCAmelCase_ : Optional[Any] )-> str:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , **UpperCAmelCase_ : Any )-> Union[str, Any]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : Tuple )-> str:
"""simple docstring"""
UpperCamelCase = "UNwant\u00E9d,running"
UpperCamelCase = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : str )-> List[str]:
"""simple docstring"""
UpperCamelCase = self.tokenizer_class(self.vocab_file )
UpperCamelCase = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
UpperCamelCase = tokenizer("UNwant\u00E9d,running" )
UpperCamelCase = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
UpperCamelCase = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 556 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
def __init__( self : Any ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : int ):
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." ,SCREAMING_SNAKE_CASE__ ,)
super().__init__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
| 143 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 143 | 1 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
snake_case = "segformer"
def __init__( self : Optional[Any] ,_SCREAMING_SNAKE_CASE : Union[str, Any]=3 ,_SCREAMING_SNAKE_CASE : Any=4 ,_SCREAMING_SNAKE_CASE : int=[2, 2, 2, 2] ,_SCREAMING_SNAKE_CASE : Tuple=[8, 4, 2, 1] ,_SCREAMING_SNAKE_CASE : str=[3_2, 6_4, 1_6_0, 2_5_6] ,_SCREAMING_SNAKE_CASE : Optional[int]=[7, 3, 3, 3] ,_SCREAMING_SNAKE_CASE : int=[4, 2, 2, 2] ,_SCREAMING_SNAKE_CASE : Any=[1, 2, 5, 8] ,_SCREAMING_SNAKE_CASE : Optional[Any]=[4, 4, 4, 4] ,_SCREAMING_SNAKE_CASE : Tuple="gelu" ,_SCREAMING_SNAKE_CASE : Dict=0.0 ,_SCREAMING_SNAKE_CASE : List[Any]=0.0 ,_SCREAMING_SNAKE_CASE : List[str]=0.1 ,_SCREAMING_SNAKE_CASE : str=0.02 ,_SCREAMING_SNAKE_CASE : Dict=0.1 ,_SCREAMING_SNAKE_CASE : Optional[int]=1E-6 ,_SCREAMING_SNAKE_CASE : List[str]=2_5_6 ,_SCREAMING_SNAKE_CASE : Union[str, Any]=2_5_5 ,**_SCREAMING_SNAKE_CASE : List[Any] ,) -> List[Any]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' ,_SCREAMING_SNAKE_CASE ,)
A = num_channels
A = num_encoder_blocks
A = depths
A = sr_ratios
A = hidden_sizes
A = patch_sizes
A = strides
A = mlp_ratios
A = num_attention_heads
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = classifier_dropout_prob
A = initializer_range
A = drop_path_rate
A = layer_norm_eps
A = decoder_hidden_size
A = kwargs.get('reshape_last_stage' ,_SCREAMING_SNAKE_CASE )
A = semantic_loss_ignore_index
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
snake_case = version.parse("1.11" )
@property
def A( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A( self : List[Any] ) -> float:
'''simple docstring'''
return 1E-4
@property
def A( self : List[str] ) -> int:
'''simple docstring'''
return 1_2
| 110 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def snake_case ( UpperCAmelCase : List[Any] ):
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase, UpperCAmelCase )
def snake_case ( UpperCAmelCase : Union[str, Any] ):
A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A = s_dict.pop(UpperCAmelCase )
elif "subsample" in key:
A = s_dict.pop(UpperCAmelCase )
def snake_case ( UpperCAmelCase : Union[str, Any] ):
A , A = emb.weight.shape
A = nn.Linear(UpperCAmelCase, UpperCAmelCase, bias=UpperCAmelCase )
A = emb.weight.data
return lin_layer
def snake_case ( UpperCAmelCase : str, UpperCAmelCase : str ):
A = torch.load(UpperCAmelCase, map_location='cpu' )
A = mam_aaa['args']
A = mam_aaa['model']
A = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(UpperCAmelCase )
rename_keys(UpperCAmelCase )
A = state_dict['decoder.embed_tokens.weight'].shape[0]
A = args.share_decoder_input_output_embed
A = [int(UpperCAmelCase ) for i in args.conv_kernel_sizes.split(',' )]
A = SpeechaTextConfig(
vocab_size=UpperCAmelCase, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='relu', num_conv_layers=len(UpperCAmelCase ), conv_channels=args.conv_channels, conv_kernel_sizes=UpperCAmelCase, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=UpperCAmelCase, num_beams=5, max_length=2_00, use_cache=UpperCAmelCase, decoder_start_token_id=2, early_stopping=UpperCAmelCase, )
A = SpeechaTextForConditionalGeneration(UpperCAmelCase )
A , A = model.model.load_state_dict(UpperCAmelCase, strict=UpperCAmelCase )
if len(UpperCAmelCase ) > 0 and not set(UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f' but all the following weights are missing {missing}' )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = lm_head_weights
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase_ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 110 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
snake_case : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 566 |
'''simple docstring'''
from string import ascii_uppercase
snake_case : List[str] = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowercase__ ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
__lowercase = """"""
__lowercase = 0
__lowercase = 0
while div != 1:
__lowercase , __lowercase = divmod(__UpperCamelCase , __UpperCamelCase )
if base >= 11 and 9 < mod < 36:
__lowercase = ALPHABET_VALUES[str(__UpperCamelCase )]
else:
__lowercase = str(__UpperCamelCase )
new_value += actual_value
__lowercase = num // base
__lowercase = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__UpperCamelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 566 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ :Optional[int] = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[int] = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
A_ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 714 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
A_ :Optional[int] = logging.get_logger()
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : nn.Module
UpperCamelCase__ : List[nn.Module] =field(default_factory=a )
UpperCamelCase__ : list =field(default_factory=a )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Dict =len(list(m.modules() ) ) == 1 or isinstance(lowerCamelCase__ , nn.Convad ) or isinstance(lowerCamelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCamelCase__ )
def __call__( self , lowerCamelCase__ ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def __lowercase ( self ):
"""simple docstring"""
return list(filter(lambda lowerCamelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : nn.Module
UpperCamelCase__ : nn.Module
UpperCamelCase__ : int =0
UpperCamelCase__ : List =field(default_factory=a )
UpperCamelCase__ : List =field(default_factory=a )
def __call__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int =Tracker(self.dest )(lowerCamelCase__ ).parametrized
__UpperCamelCase : int =Tracker(self.src )(lowerCamelCase__ ).parametrized
__UpperCamelCase : Optional[int] =list(filter(lambda lowerCamelCase__ : type(lowerCamelCase__ ) not in self.src_skip , lowerCamelCase__ ) )
__UpperCamelCase : Optional[int] =list(filter(lambda lowerCamelCase__ : type(lowerCamelCase__ ) not in self.dest_skip , lowerCamelCase__ ) )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise Exception(
f'Numbers of operations are different. Source module has {len(lowerCamelCase__ )} operations while'
f' destination module has {len(lowerCamelCase__ )}.' )
for dest_m, src_m in zip(lowerCamelCase__ , lowerCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def A ( a_ ,a_ ,a_ ,a_ = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
__UpperCamelCase : Tuple =timm.create_model(a_ ,pretrained=a_ ).eval()
__UpperCamelCase : Tuple =ResNetForImageClassification(a_ ).eval()
__UpperCamelCase : Optional[int] =ModuleTransfer(src=a_ ,dest=a_ )
__UpperCamelCase : Optional[int] =torch.randn((1, 3, 224, 224) )
module_transfer(a_ )
assert torch.allclose(from_model(a_ ) ,our_model(a_ ).logits ), "The model logits don't match the original one."
__UpperCamelCase : int =F'resnet{"-".join(name.split("resnet" ) )}'
print(a_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message='Add model' ,use_temp_dir=a_ ,)
# we can use the convnext one
__UpperCamelCase : Optional[int] =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message='Add image processor' ,use_temp_dir=a_ ,)
print(F'Pushed {checkpoint_name}' )
def A ( a_ ,a_ = None ,a_ = True ) -> int:
__UpperCamelCase : Any ='imagenet-1k-id2label.json'
__UpperCamelCase : Optional[Any] =1_000
__UpperCamelCase : Optional[int] =(1, num_labels)
__UpperCamelCase : Any ='huggingface/label-files'
__UpperCamelCase : List[Any] =num_labels
__UpperCamelCase : int =json.load(open(hf_hub_download(a_ ,a_ ,repo_type='dataset' ) ,'r' ) )
__UpperCamelCase : Union[str, Any] ={int(a_ ): v for k, v in idalabel.items()}
__UpperCamelCase : str =idalabel
__UpperCamelCase : List[str] ={v: k for k, v in idalabel.items()}
__UpperCamelCase : Union[str, Any] =partial(a_ ,num_labels=a_ ,idalabel=a_ ,labelaid=a_ )
__UpperCamelCase : Optional[Any] ={
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1_024, 2_048] ,layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1_024, 2_048] ,layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1_024, 2_048] ,layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1_024, 2_048] ,layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(a_ ,names_to_config[model_name] ,a_ ,a_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a_ ,a_ ,a_ ,a_ )
return config, expected_shape
if __name__ == "__main__":
A_ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
A_ :Optional[int] = parser.parse_args()
A_ :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 154 | 0 |
"""simple docstring"""
class __UpperCamelCase ( a__ ):
pass
class __UpperCamelCase ( a__ ):
pass
class __UpperCamelCase :
def __init__( self ) -> int:
a : List[Any] = [
[],
[],
[],
]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(lowerCAmelCase__ )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def __a ( self ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ) -> str:
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class __UpperCamelCase :
def __init__( self ) -> Optional[int]:
a : Optional[int] = []
def __a ( self , lowerCAmelCase__ ) -> None:
if len(self.queue ) == 100:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(lowerCAmelCase__ )
def __a ( self ) -> int:
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
a : Union[str, Any] = min(self.queue )
self.queue.remove(lowerCAmelCase__ )
return data
def __str__( self ) -> str:
return str(self.queue )
def _SCREAMING_SNAKE_CASE ( ) ->str:
'''simple docstring'''
a : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_lowercase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowercase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _SCREAMING_SNAKE_CASE ( ) ->Any:
'''simple docstring'''
a : Union[str, Any] = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowercase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowercase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 633 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] ) ->float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
a : Dict = sum(_lowercase ) / len(_lowercase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 1 |
'''simple docstring'''
import argparse
lowerCAmelCase__ = "docs/source/_static/js/custom.js"
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> List[Any]:
with open(UpperCamelCase ,encoding='utf-8' ,newline='\n' ) as f:
UpperCAmelCase_ : Dict = f.readlines()
UpperCAmelCase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
UpperCAmelCase_ : Tuple = f"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n"""
with open(UpperCamelCase ,'w' ,encoding='utf-8' ,newline='\n' ) as f:
f.writelines(UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
lowerCAmelCase__ = parser.parse_args()
update_custom_js(args.version)
| 471 |
'''simple docstring'''
import qiskit
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ) -> qiskit.result.counts.Counts:
UpperCAmelCase_ : Tuple = qiskit.Aer.get_backend('aer_simulator' )
UpperCAmelCase_ : List[str] = qiskit.QuantumCircuit(4 ,2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 ,2 )
qc_ha.cx(1 ,2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 ,1 ,3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 ,0 ) # extract XOR value
qc_ha.measure(3 ,1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase_ : List[str] = qiskit.execute(UpperCamelCase ,UpperCamelCase ,shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ = half_adder(1, 1)
print(F'Half Adder Output Qubit Counts: {counts}')
| 471 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.