code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from math import pi, sqrt, tan
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
lowerCAmelCase__ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(lowerCAmelCase_ , 2 ) * torus_radius * tube_radius
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
lowerCAmelCase__ = (sidea + sidea + sidea) / 2
lowerCAmelCase__ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : float ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 61 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=13 , SCREAMING_SNAKE_CASE__ : Optional[Any]=10 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : List[Any]=37 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=10 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Tuple="divided_space_time" , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> List[str]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_frames
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = attention_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = (num_frames) * self.num_patches_per_frame + 1
def a ( self : int ) -> Tuple:
lowerCAmelCase__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def a ( self : List[Any] ) -> Any:
lowerCAmelCase__ = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowerCAmelCase__ = self.num_labels
return config
def a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
lowerCAmelCase__ = TimesformerModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
lowerCAmelCase__ = TimesformerForVideoClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
# verify the logits shape
lowerCAmelCase__ = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> Dict:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case__ = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : List[str] ) -> List[Any]:
lowerCAmelCase__ = TimesformerModelTester(self )
lowerCAmelCase__ = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> str:
lowerCAmelCase__ = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def a ( self : Optional[Any] ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def a ( self : Union[str, Any] ) -> Tuple:
pass
def a ( self : Dict ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def a ( self : int ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : str ) -> Tuple:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TimesformerModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> Dict:
if not self.has_attentions:
pass
else:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
lowerCAmelCase__ = self.model_tester.seq_length
lowerCAmelCase__ = self.model_tester.num_frames
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def a ( self : List[str] ) -> Any:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowerCAmelCase__ = np.load(lowerCAmelCase_ )
return list(lowerCAmelCase_ )
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self : Optional[Any] ) -> Union[str, Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def a ( self : Optional[Any] ) -> str:
lowerCAmelCase__ = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_video()
lowerCAmelCase__ = image_processor(video[:8] , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 61 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
a_ :int
a_ :int
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a = [[] for _ in range(SCREAMING_SNAKE_CASE__ )]
__a = size
def __getitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def __a ( self : List[str] ):
'''simple docstring'''
return self._size
def __a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __a ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a = deque([start_vertex] )
__a = [None] * self.size
__a = 0
while queue:
__a = queue.popleft()
__a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__a = current_distance + edge.weight
__a = distances[edge.destination_vertex]
if (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and new_distance >= dest_vertex_distance
):
continue
__a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201 |
'''simple docstring'''
from math import pi, sqrt
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(__SCREAMING_SNAKE_CASE ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__SCREAMING_SNAKE_CASE )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __lowercase ( ) -> None:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(__SCREAMING_SNAKE_CASE )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('Gamma of: '))
print(f"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 201 | 1 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 282 |
import logging
from transformers import PretrainedConfig
_snake_case = logging.getLogger(__name__)
_snake_case = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int ="bertabs"
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE__ : List[Any]=6 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : List[Any]=0.2 , SCREAMING_SNAKE_CASE__ : Tuple=6 , SCREAMING_SNAKE_CASE__ : Any=7_68 , SCREAMING_SNAKE_CASE__ : str=8 , SCREAMING_SNAKE_CASE__ : Dict=20_48 , SCREAMING_SNAKE_CASE__ : List[str]=0.2 , **SCREAMING_SNAKE_CASE__ : List[str] , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = vocab_size
UpperCamelCase = max_pos
UpperCamelCase = enc_layers
UpperCamelCase = enc_hidden_size
UpperCamelCase = enc_heads
UpperCamelCase = enc_ff_size
UpperCamelCase = enc_dropout
UpperCamelCase = dec_layers
UpperCamelCase = dec_hidden_size
UpperCamelCase = dec_heads
UpperCamelCase = dec_ff_size
UpperCamelCase = dec_dropout
| 282 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def snake_case__ ( *lowerCAmelCase_ , **lowerCAmelCase_ ):
pass
@is_pipeline_test
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__lowercase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = vqa_pipeline(lowerCAmelCase_ , top_k=1 )
self.assertEqual(
lowerCAmelCase_ , [
[{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ )}],
[{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ )}],
] , )
@require_torch
def snake_case__ ( self ):
__lowercase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = "How many cats are there?"
__lowercase = vqa_pipeline(image=lowerCAmelCase_ , question="How many cats are there?" , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ )}, {"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ )}] )
__lowercase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ )}, {"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ )}] )
@slow
@require_torch
def snake_case__ ( self ):
__lowercase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = "How many cats are there?"
__lowercase = vqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
__lowercase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
__lowercase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def snake_case__ ( self ):
pass
| 719 | from __future__ import annotations
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
if len(_UpperCAmelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
__lowercase = __lowercase = sum(array[:k] )
for i in range(len(_UpperCAmelCase ) - k ):
__lowercase = current_sum - array[i] + array[i + k]
__lowercase = max(_UpperCAmelCase , _UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCAmelCase__ = [randint(-1_000, 1_000) for i in range(100)]
lowerCAmelCase__ = randint(0, 110)
print(F"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 576 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = IFInpaintingPipeline
__UpperCamelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__UpperCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCamelCase : Dict = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCAmelCase__ ( self : List[Any] ):
return self._get_dummy_components()
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Any , snake_case_ : Any=0 ):
if str(_A ).startswith("""mps""" ):
UpperCamelCase_: Optional[Any] = torch.manual_seed(_A )
else:
UpperCamelCase_: Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
UpperCamelCase_: str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCamelCase_: Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCamelCase_: Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self : Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCAmelCase__ ( self : Any ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self : int ):
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase__ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase__ ( self : Tuple ):
self._test_save_load_local()
def lowerCAmelCase__ ( self : Optional[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 548 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : int = [i[0] for i in r], [i[1] for i in r]
UpperCamelCase : Optional[Any] = list(accumulate(SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Optional[Any] = bisect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 0 |
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : Optional[Any] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase_ : str = set()
return any(
node not in visited and depth_first_search(A_ , A_ , A_ , A_ )
for node in graph )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] ):
visited.add(A_ )
rec_stk.add(A_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A_ , A_ , A_ , A_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 710 | """simple docstring"""
__SCREAMING_SNAKE_CASE ="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowercase__( __SCREAMING_SNAKE_CASE : bytes ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = ''.join(bin(__SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
lowercase_ : List[Any] = len(__SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : Optional[int] = b'=' * ((6 - len(__SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__SCREAMING_SNAKE_CASE ) % 6)
else:
lowercase_ : List[Any] = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = (
'argument should be a bytes-like object or ASCII string, '
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
try:
lowercase_ : List[str] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
lowercase_ : List[str] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Union[str, Any] = encoded_data[:-padding]
lowercase_ : List[Any] = ''.join(
bin(B64_CHARSET.index(__SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = ''.join(
bin(B64_CHARSET.index(__SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 477 | 0 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
SCREAMING_SNAKE_CASE_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCamelCase__ ( _lowercase : dict[int, list[int]] , _lowercase : int , _lowercase : list[bool] ) -> list[int]:
__UpperCAmelCase: Dict = True
__UpperCAmelCase: Union[str, Any] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_lowercase , _lowercase , _lowercase )
order.append(_lowercase )
return order
def UpperCamelCase__ ( _lowercase : dict[int, list[int]] , _lowercase : int , _lowercase : list[bool] ) -> list[int]:
__UpperCAmelCase: Dict = True
__UpperCAmelCase: Dict = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_lowercase , _lowercase , _lowercase )
return component
def UpperCamelCase__ ( _lowercase : dict[int, list[int]] ) -> list[list[int]]:
__UpperCAmelCase: Any = len(_lowercase ) * [False]
__UpperCAmelCase: dict[int, list[int]] = {vert: [] for vert in range(len(_lowercase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_lowercase )
__UpperCAmelCase: List[Any] = []
for i, was_visited in enumerate(_lowercase ):
if not was_visited:
order += topology_sort(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase: Optional[Any] = []
__UpperCAmelCase: Optional[int] = len(_lowercase ) * [False]
for i in range(len(_lowercase ) ):
__UpperCAmelCase: Union[str, Any] = order[len(_lowercase ) - i - 1]
if not visited[vert]:
__UpperCAmelCase: str = find_components(_lowercase , _lowercase , _lowercase )
components_list.append(_lowercase )
return components_list | 523 | '''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = StableDiffusionPanoramaPipeline
__lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase: List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__UpperCAmelCase: List[str] = DDIMScheduler()
torch.manual_seed(0 )
__UpperCAmelCase: List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__UpperCAmelCase: Union[str, Any] = CLIPTextModel(snake_case_ )
__UpperCAmelCase: List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCAmelCase: int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase_ ( self , snake_case_ , snake_case_=0 ):
'''simple docstring'''
__UpperCAmelCase: Dict = torch.manual_seed(snake_case_ )
__UpperCAmelCase: Optional[Any] = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase: Any = self.get_dummy_components()
__UpperCAmelCase: str = StableDiffusionPanoramaPipeline(**snake_case_ )
__UpperCAmelCase: Tuple = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase: Optional[int] = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase: str = sd_pipe(**snake_case_ ).images
__UpperCAmelCase: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase: Dict = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase: Tuple = self.get_dummy_components()
__UpperCAmelCase: Any = StableDiffusionPanoramaPipeline(**snake_case_ )
__UpperCAmelCase: List[Any] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase: int = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase: Optional[Any] = """french fries"""
__UpperCAmelCase: str = sd_pipe(**snake_case_ , negative_prompt=snake_case_ )
__UpperCAmelCase: Tuple = output.images
__UpperCAmelCase: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase: int = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase: List[str] = self.get_dummy_components()
__UpperCAmelCase: str = StableDiffusionPanoramaPipeline(**snake_case_ )
__UpperCAmelCase: Dict = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase: str = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase: Dict = sd_pipe(**snake_case_ , view_batch_size=2 )
__UpperCAmelCase: Dict = output.images
__UpperCAmelCase: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase: Optional[Any] = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase: Optional[int] = self.get_dummy_components()
__UpperCAmelCase: Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" )
__UpperCAmelCase: str = StableDiffusionPanoramaPipeline(**snake_case_ )
__UpperCAmelCase: Dict = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase: Dict = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase: Tuple = sd_pipe(**snake_case_ ).images
__UpperCAmelCase: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase: Union[str, Any] = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase: Dict = self.get_dummy_components()
__UpperCAmelCase: List[Any] = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , skip_prk_steps=snake_case_ )
__UpperCAmelCase: List[str] = StableDiffusionPanoramaPipeline(**snake_case_ )
__UpperCAmelCase: str = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase: Optional[Any] = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase: int = sd_pipe(**snake_case_ ).images
__UpperCAmelCase: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase: Dict = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self , snake_case_=0 ):
'''simple docstring'''
__UpperCAmelCase: Any = torch.manual_seed(snake_case_ )
__UpperCAmelCase: Tuple = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = """stabilityai/stable-diffusion-2-base"""
__UpperCAmelCase: Optional[Any] = DDIMScheduler.from_pretrained(snake_case_ , subfolder="""scheduler""" )
__UpperCAmelCase: List[str] = StableDiffusionPanoramaPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
__UpperCAmelCase: Tuple = self.get_inputs()
__UpperCAmelCase: Tuple = pipe(**snake_case_ ).images
__UpperCAmelCase: List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
__UpperCAmelCase: int = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=snake_case_ )
__UpperCAmelCase: int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
__UpperCAmelCase: str = self.get_inputs()
__UpperCAmelCase: str = pipe(**snake_case_ ).images
__UpperCAmelCase: Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
__UpperCAmelCase: str = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = 0
def callback_fn(snake_case_ , snake_case_ , snake_case_ ) -> None:
__UpperCAmelCase: List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__UpperCAmelCase: Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
__UpperCAmelCase: Union[str, Any] = latents[0, -3:, -3:, -1]
__UpperCAmelCase: Any = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__UpperCAmelCase: Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
__UpperCAmelCase: str = latents[0, -3:, -3:, -1]
__UpperCAmelCase: List[Any] = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__UpperCAmelCase: Union[str, Any] = False
__UpperCAmelCase: Optional[Any] = """stabilityai/stable-diffusion-2-base"""
__UpperCAmelCase: Optional[Any] = DDIMScheduler.from_pretrained(snake_case_ , subfolder="""scheduler""" )
__UpperCAmelCase: Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
__UpperCAmelCase: List[Any] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
__UpperCAmelCase: Optional[Any] = self.get_inputs()
pipe(**snake_case_ , callback=snake_case_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase_ ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase: Optional[Any] = """stabilityai/stable-diffusion-2-base"""
__UpperCAmelCase: Optional[int] = DDIMScheduler.from_pretrained(snake_case_ , subfolder="""scheduler""" )
__UpperCAmelCase: Any = StableDiffusionPanoramaPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
__UpperCAmelCase: Tuple = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase: Optional[Any] = self.get_inputs()
__UpperCAmelCase: Union[str, Any] = pipe(**snake_case_ )
__UpperCAmelCase: Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9 | 523 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Any = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : str = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
| 47 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 47 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
a__ : Union[str, Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def _lowerCAmelCase ( A__ , A__ ):
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(A__ )
lowercase__, lowercase__ = XLMProphetNetForConditionalGeneration.from_pretrained(
A__ , output_loading_info=A__ )
else:
lowercase__ = ProphetNetForConditionalGenerationOld.from_pretrained(A__ )
lowercase__, lowercase__ = ProphetNetForConditionalGeneration.from_pretrained(
A__ , output_loading_info=A__ )
lowercase__ = ['key_proj', 'value_proj', 'query_proj']
lowercase__ = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
lowercase__ = key.split('.' )
if attributes[0] == "lm_head":
lowercase__ = prophet
lowercase__ = prophet_old
else:
lowercase__ = prophet.prophetnet
lowercase__ = prophet_old.model
lowercase__ = False
for attribute in attributes:
if attribute in mapping:
lowercase__ = mapping[attribute]
if not hasattr(A__ , A__ ) and len(A__ ) > 0:
lowercase__ = attribute
elif hasattr(A__ , A__ ):
lowercase__ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
lowercase__ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ = old_model.bias
logger.info(F'''{attribute} is initialized''' )
lowercase__ = True
break
elif attribute in special_keys and hasattr(A__ , 'in_proj_weight' ):
lowercase__ = old_model.in_proj_weight.shape[0] // 3
lowercase__ = getattr(A__ , A__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ = True
break
if attribute.isdigit():
lowercase__ = model[int(A__ )]
lowercase__ = old_model[int(A__ )]
else:
lowercase__ = getattr(A__ , A__ )
if old_attribute == "":
lowercase__ = old_model
else:
if not hasattr(A__ , A__ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
lowercase__ = getattr(A__ , A__ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(A__ )
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a__ : List[str] = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 622 |
from math import sqrt
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(A__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(A__ , A__ ), "'status' must been from type bool"
return status
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(A__ ) ):
for j in range(i + 1 , len(A__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(A__ ):
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(A__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(A__ ):
while quotient != 1:
if is_prime(A__ ) and (quotient % factor == 0):
ans.append(A__ )
quotient /= factor
else:
factor += 1
else:
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(A__ )
lowercase__ = max(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(A__ )
lowercase__ = min(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , A__ ), "compare bust been from type bool"
return number % 2 == 0
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , A__ ), "compare bust been from type bool"
return number % 2 != 0
def _lowerCAmelCase ( A__ ):
assert (
isinstance(A__ , A__ ) and (number > 2) and is_even(A__ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(A__ )
lowercase__ = len(A__ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(A__ , A__ )
and (len(A__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _lowerCAmelCase ( A__ , A__ ):
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(A__ , A__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _lowerCAmelCase ( A__ , A__ ):
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(A__ )
lowercase__ = prime_factorization(A__ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(A__ , A__ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(A__ )
lowercase__ = prime_fac_a.count(A__ )
for _ in range(max(A__ , A__ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# precondition
assert isinstance(A__ , A__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(A__ ):
ans += 1
# precondition
assert isinstance(A__ , A__ ) and is_prime(
A__ ), "'ans' must been a prime number and from type int"
return ans
def _lowerCAmelCase ( A__ , A__ ):
assert (
is_prime(A__ ) and is_prime(A__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
while number < p_number_a:
ans.append(A__ )
number += 1
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
# precondition
assert (
isinstance(A__ , A__ )
and ans[0] != p_number_a
and ans[len(A__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(A__ )
# precondition
assert ans[0] == 1 and ans[len(A__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(A__ )
# precondition
assert (
isinstance(A__ , A__ )
and (divisors[0] == 1)
and (divisors[len(A__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _lowerCAmelCase ( A__ , A__ ):
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(A__ ) , abs(A__ ) )
# precondition
assert (
isinstance(A__ , A__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 622 | 1 |
'''simple docstring'''
UpperCAmelCase = 8.314_4598
def _snake_case ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
UpperCAmelCase = 300
UpperCAmelCase = 28
UpperCAmelCase = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''') | 344 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ) -> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
def _snake_case ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ) -> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
lowerCAmelCase = (
"""Wrong input data's dimensions... """
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCAmelCase = (
"""Wrong input data's shape... """
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
lowerCAmelCase = (
"""Input data have different datatype... """
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for value in value_array:
lowerCAmelCase = euclidean(_SCREAMING_SNAKE_CASE , dataset[0] )
lowerCAmelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCAmelCase = euclidean(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if dist > temp_dist:
lowerCAmelCase = temp_dist
lowerCAmelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ) -> float:
"""simple docstring"""
return np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / (norm(_SCREAMING_SNAKE_CASE ) * norm(_SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 344 | 1 |
import argparse
import os
import re
__lowerCAmelCase : Dict = 'src/transformers'
# Pattern that looks at the indentation in a line.
__lowerCAmelCase : Dict = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
__lowerCAmelCase : Union[str, Any] = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__lowerCAmelCase : List[Any] = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
__lowerCAmelCase : Tuple = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__lowerCAmelCase : Optional[int] = re.compile(R'\[([^\]]+)\]')
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = _re_indent.search(A_ )
return "" if search is None else search.groups()[0]
def a__ ( A_, A_="", A_=None, A_=None ):
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(A_ ):
index += 1
__magic_name__ = ["""\n""".join(lines[:index] )]
else:
__magic_name__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__magic_name__ = [lines[index]]
index += 1
while index < len(A_ ) and (end_prompt is None or not lines[index].startswith(A_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(A_ ) )
if index < len(A_ ) - 1:
__magic_name__ = [lines[index + 1]]
index += 1
else:
__magic_name__ = []
else:
blocks.append("""\n""".join(A_ ) )
__magic_name__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A_ ) > 0:
blocks.append("""\n""".join(A_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def a__ ( A_ ):
'''simple docstring'''
def _inner(A_ ):
return key(A_ ).lower().replace("""_""", """""" )
return _inner
def a__ ( A_, A_=None ):
'''simple docstring'''
def noop(A_ ):
return x
if key is None:
__magic_name__ = noop
# Constants are all uppercase, they go first.
__magic_name__ = [obj for obj in objects if key(A_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__magic_name__ = [obj for obj in objects if key(A_ )[0].isupper() and not key(A_ ).isupper()]
# Functions begin with a lowercase, they go last.
__magic_name__ = [obj for obj in objects if not key(A_ )[0].isupper()]
__magic_name__ = ignore_underscore(A_ )
return sorted(A_, key=A_ ) + sorted(A_, key=A_ ) + sorted(A_, key=A_ )
def a__ ( A_ ):
'''simple docstring'''
def _replace(A_ ):
__magic_name__ = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
__magic_name__ = [part.strip().replace("""\"""", """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__magic_name__ = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(A_ )] ) + "]"
__magic_name__ = import_statement.split("""\n""" )
if len(A_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__magic_name__ = 2 if lines[1].strip() == """[""" else 1
__magic_name__ = [(i, _re_strip_line.search(A_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__magic_name__ = sort_objects(A_, key=lambda A_ : x[1] )
__magic_name__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__magic_name__ = _re_bracket_content.sub(_replace, lines[1] )
else:
__magic_name__ = [part.strip().replace("""\"""", """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__magic_name__ = keys[:-1]
__magic_name__ = get_indent(lines[1] ) + """, """.join([f'''"{k}"''' for k in sort_objects(A_ )] )
return "\n".join(A_ )
else:
# Finally we have to deal with imports fitting on one line
__magic_name__ = _re_bracket_content.sub(_replace, A_ )
return import_statement
def a__ ( A_, A_=True ):
'''simple docstring'''
with open(A_, encoding="""utf-8""" ) as f:
__magic_name__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__magic_name__ = split_code_in_indented_blocks(
A_, start_prompt="""_import_structure = {""", end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1, len(A_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__magic_name__ = main_blocks[block_idx]
__magic_name__ = block.split("""\n""" )
# Get to the start of the imports.
__magic_name__ = 0
while line_idx < len(A_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__magic_name__ = len(A_ )
else:
line_idx += 1
if line_idx >= len(A_ ):
continue
# Ignore beginning and last line: they don't contain anything.
__magic_name__ = """\n""".join(block_lines[line_idx:-1] )
__magic_name__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__magic_name__ = split_code_in_indented_blocks(A_, indent_level=A_ )
# We have two categories of import key: list or _import_structure[key].append/extend
__magic_name__ = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__magic_name__ = [(pattern.search(A_ ).groups()[0] if pattern.search(A_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__magic_name__ = [(i, key) for i, key in enumerate(A_ ) if key is not None]
__magic_name__ = [x[0] for x in sorted(A_, key=lambda A_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__magic_name__ = 0
__magic_name__ = []
for i in range(len(A_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__magic_name__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(A_ )
count += 1
# And we put our main block back together with its first and last line.
__magic_name__ = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(A_ ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(A_, """w""", encoding="""utf-8""" ) as f:
f.write("""\n""".join(A_ ) )
def a__ ( A_=True ):
'''simple docstring'''
__magic_name__ = []
for root, _, files in os.walk(A_ ):
if "__init__.py" in files:
__magic_name__ = sort_imports(os.path.join(A_, """__init__.py""" ), check_only=A_ )
if result:
__magic_name__ = [os.path.join(A_, """__init__.py""" )]
if len(A_ ) > 0:
raise ValueError(f'''Would overwrite {len(A_ )} files, run `make style`.''' )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__lowerCAmelCase : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 529 |
from __future__ import annotations
def a__ ( A_, A_ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((__magic_name__) , (__magic_name__)) = extended_euclid(A_, a % b )
__magic_name__ = a // b
return (y, x - k * y)
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
((__magic_name__) , (__magic_name__)) = extended_euclid(A_, A_ )
__magic_name__ = na * na
__magic_name__ = ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( A_, A_ ):
'''simple docstring'''
((__magic_name__) , (__magic_name__)) = extended_euclid(A_, A_ )
if b < 0:
__magic_name__ = (b % n + n) % n
return b
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
__magic_name__ , __magic_name__ = invert_modulo(A_, A_ ), invert_modulo(A_, A_ )
__magic_name__ = na * na
__magic_name__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 529 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Tuple = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 148 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def __a ( __UpperCAmelCase , __UpperCAmelCase = "cpu" , __UpperCAmelCase = None ):
a__ = torch.load(__UpperCAmelCase , map_location=__UpperCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__UpperCAmelCase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
a__ = v.half()
if save_path is None: # overwrite src_path
a__ = src_path
torch.save(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 148 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Union[str, Any] = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
UpperCamelCase :str = hex_num[0] == '''-'''
if is_negative:
UpperCamelCase :Union[str, Any] = hex_num[1:]
try:
UpperCamelCase :Optional[Any] = int(SCREAMING_SNAKE_CASE__ , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
UpperCamelCase :Dict = ''''''
while int_num > 0:
UpperCamelCase :Tuple = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
"""simple docstring"""
import math
import qiskit
def snake_case_ ( A_ : int = 1, A_ : int = 1, A_ : int = 1 ):
'''simple docstring'''
if (
isinstance(A_, A_ )
or isinstance(A_, A_ )
or isinstance(A_, A_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(A_ ) != input_a)
or (math.floor(A_ ) != input_a)
or (math.floor(A_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
_lowerCamelCase : Dict = qiskit.QuantumRegister(4, '''qr''' )
_lowerCamelCase : Dict = qiskit.ClassicalRegister(2, '''cr''' )
# list the entries
_lowerCamelCase : List[str] = [input_a, input_a, carry_in]
_lowerCamelCase : Dict = qiskit.QuantumCircuit(A_, A_ )
for i in range(0, 3 ):
if entry[i] == 2:
quantum_circuit.h(A_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(A_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(A_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0, 1, 3 ) # ccx = toffoli gate
quantum_circuit.cx(0, 1 )
quantum_circuit.ccx(1, 2, 3 )
quantum_circuit.cx(1, 2 )
quantum_circuit.cx(0, 1 )
quantum_circuit.measure([2, 3], A_ ) # measure the last two qbits
_lowerCamelCase : Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
_lowerCamelCase : Optional[Any] = qiskit.execute(A_, A_, shots=10_00 )
return job.result().get_counts(A_ )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 598 |
"""simple docstring"""
lowerCAmelCase__ = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowerCAmelCase__ = ['''a''', '''b''', '''c''', '''d''', '''e''']
def snake_case_ ( A_ : Any, A_ : Optional[Any], A_ : str ):
'''simple docstring'''
_lowerCamelCase : int = start
# add current to visited
visited.append(A_ )
_lowerCamelCase : str = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
_lowerCamelCase : int = topological_sort(A_, A_, A_ )
# if all neighbors visited add current to sort
sort.append(A_ )
# if all vertices haven't been visited select a new one to visit
if len(A_ ) != len(A_ ):
for vertice in vertices:
if vertice not in visited:
_lowerCamelCase : List[str] = topological_sort(A_, A_, A_ )
# return sort
return sort
if __name__ == "__main__":
lowerCAmelCase__ = topological_sort('''a''', [], [])
print(sort)
| 598 | 1 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : List[Any]=28123 ):
a__ : List[Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
a__ : Dict = set()
a__ : Dict = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowerCAmelCase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 688 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__SCREAMING_SNAKE_CASE = open # noqa: we just need to have a builtin inside this module to test it properly
| 688 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Dict = '''ibert'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str=3_0_5_2_2 , UpperCAmelCase__ : Dict=7_6_8 , UpperCAmelCase__ : Tuple=1_2 , UpperCAmelCase__ : Tuple=1_2 , UpperCAmelCase__ : Optional[Any]=3_0_7_2 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[int]=5_1_2 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : Dict=1E-12 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[int]="absolute" , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Optional[Any]="none" , **UpperCAmelCase__ : Union[str, Any] , ) -> Any:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = quant_mode
lowerCAmelCase = force_dequant
class UpperCAmelCase_ ( __lowercase ):
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 513 |
'''simple docstring'''
from statistics import mean
import numpy as np
def a_ ( lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : int ):
lowerCAmelCase = 0
# Number of processes finished
lowerCAmelCase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCAmelCase = [0] * no_of_process
# List to include calculation results
lowerCAmelCase = [0] * no_of_process
# Sort by arrival time.
lowerCAmelCase = [burst_time[i] for i in np.argsort(lowerCamelCase )]
lowerCAmelCase = [process_name[i] for i in np.argsort(lowerCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCAmelCase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCAmelCase = arrival_time[i]
lowerCAmelCase = 0
# Index showing the location of the process being performed
lowerCAmelCase = 0
# Saves the current response ratio.
lowerCAmelCase = 0
for i in range(0 , lowerCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCAmelCase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCAmelCase = temp
lowerCAmelCase = i
# Calculate the turn around time
lowerCAmelCase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCAmelCase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def a_ ( lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_process
for i in range(0 , lowerCamelCase ):
lowerCAmelCase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__snake_case =5
__snake_case =["""A""", """B""", """C""", """D""", """E"""]
__snake_case =[1, 2, 3, 4, 5]
__snake_case =[1, 2, 3, 4, 5]
__snake_case =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__snake_case =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 513 | 1 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict=7 , _lowercase : List[Any]=3 , _lowercase : str=18 , _lowercase : Optional[int]=30 , _lowercase : List[Any]=4_00 , _lowercase : Any=True , _lowercase : Optional[int]=None , _lowercase : int=True , _lowercase : List[str]=None , _lowercase : int=True , _lowercase : List[str]=[0.5, 0.5, 0.5] , _lowercase : Dict=[0.5, 0.5, 0.5] , _lowercase : Union[str, Any]=False , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = size if size is not None else {'''height''': 20, '''width''': 20}
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Dict = batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE__ : List[str] = min_resolution
SCREAMING_SNAKE_CASE__ : Dict = max_resolution
SCREAMING_SNAKE_CASE__ : int = do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Tuple = crop_size
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean
SCREAMING_SNAKE_CASE__ : int = image_std
SCREAMING_SNAKE_CASE__ : Optional[int] = do_reduce_labels
def lowercase__ ( self : Tuple ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def a ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : str = Image.open(dataset[0]['''file'''] )
SCREAMING_SNAKE_CASE__ : Dict = Image.open(dataset[1]['''file'''] )
return image, map
def a ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Any = Image.open(ds[0]['''file'''] )
SCREAMING_SNAKE_CASE__ : Dict = Image.open(ds[1]['''file'''] )
SCREAMING_SNAKE_CASE__ : List[str] = Image.open(ds[2]['''file'''] )
SCREAMING_SNAKE_CASE__ : List[str] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = BeitImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = BeitImageProcessingTester(self )
@property
def lowercase__ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowercase , '''size''' ) )
self.assertTrue(hasattr(_lowercase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowercase , '''center_crop''' ) )
self.assertTrue(hasattr(_lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowercase , '''image_std''' ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowercase )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowercase )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self : Dict ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self : Optional[Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self : Optional[int] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = []
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
SCREAMING_SNAKE_CASE__ : str = image_processing(_lowercase , _lowercase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE__ : int = image_processing(_lowercase , _lowercase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE__ : Any = image_processing(_lowercase , _lowercase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def lowercase__ ( self : int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE__ : List[str] = image_processing(_lowercase , _lowercase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(_lowercase , _lowercase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 35 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = MobileBertTokenizer
a__ = MobileBertTokenizerFast
a__ = True
a__ = True
a__ = filter_non_english
a__ = '''google/mobilebert-uncased'''
def A ( self ):
"""simple docstring"""
super().setUp()
__magic_name__ :Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__magic_name__ :List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running'''
__magic_name__ :int = '''unwanted, running'''
return input_text, output_text
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file )
__magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def A ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__magic_name__ :int = self.get_tokenizer()
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :List[str] = '''UNwant\u00E9d,running'''
__magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer()
__magic_name__ :Any = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# With lower casing
__magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :Dict = '''UNwant\u00E9d,running'''
__magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__magic_name__ :Union[str, Any] = {}
for i, token in enumerate(__lowerCAmelCase ):
__magic_name__ :Tuple = i
__magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.get_tokenizer()
__magic_name__ :Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def A ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__magic_name__ :Optional[Any] = tokenizer_r.encode_plus(
__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , )
__magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False
__magic_name__ :Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = ['''的''', '''人''', '''有''']
__magic_name__ :Any = ''''''.join(__lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = True
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[str] = False
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__magic_name__ :Dict = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase )
]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 0 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCAmelCase = logging.getLogger(__name__)
def _snake_case ( ) -> str:
"""simple docstring"""
lowerCAmelCase = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=_SCREAMING_SNAKE_CASE , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=_SCREAMING_SNAKE_CASE , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=_SCREAMING_SNAKE_CASE , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=_SCREAMING_SNAKE_CASE , default="""data/dump""" , help="""The dump file prefix.""" )
lowerCAmelCase = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
lowerCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
lowerCAmelCase = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
lowerCAmelCase = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
lowerCAmelCase = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
lowerCAmelCase = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f'{len(_SCREAMING_SNAKE_CASE )} examples to process.' )
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 10_000
lowerCAmelCase = time.time()
for text in data:
lowerCAmelCase = f'{bos} {text.strip()} {sep}'
lowerCAmelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
rslt.append(_SCREAMING_SNAKE_CASE )
iter += 1
if iter % interval == 0:
lowerCAmelCase = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
lowerCAmelCase = time.time()
logger.info("""Finished binarization""" )
logger.info(f'{len(_SCREAMING_SNAKE_CASE )} examples processed.' )
lowerCAmelCase = f'{args.dump_file}.{args.tokenizer_name}.pickle'
lowerCAmelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCAmelCase = [np.uintaa(_SCREAMING_SNAKE_CASE ) for d in rslt]
else:
lowerCAmelCase = [np.intaa(_SCREAMING_SNAKE_CASE ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as handle:
pickle.dump(rslt_ , _SCREAMING_SNAKE_CASE , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main() | 717 |
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 344 | 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCamelCase : Union[str, Any] = sys.version_info >= (3, 10)
def UpperCamelCase_ ( __a=None , __a=None ) -> Union[str, Any]:
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = 4_2
_lowercase = 4_2
_lowercase = 4_2
_lowercase = 4_2
@dataclass
class A__ :
"""simple docstring"""
_lowercase = 4_2
_lowercase = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = False
_lowercase = True
_lowercase = None
class A__ ( _lowercase ):
"""simple docstring"""
_lowercase = 'titi'
_lowercase = 'toto'
class A__ ( _lowercase ):
"""simple docstring"""
_lowercase = 'titi'
_lowercase = 'toto'
_lowercase = 4_2
@dataclass
class A__ :
"""simple docstring"""
_lowercase = 'toto'
def _UpperCamelCase( self : Optional[int] ):
a__ : List[Any] = BasicEnum(self.foo )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = 'toto'
def _UpperCamelCase( self : str ):
a__ : Tuple = MixedTypeEnum(self.foo )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = None
_lowercase = field(default=_lowercase , metadata={'help': 'help message'} )
_lowercase = None
_lowercase = list_field(default=[] )
_lowercase = list_field(default=[] )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = list_field(default=[] )
_lowercase = list_field(default=[1, 2, 3] )
_lowercase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_lowercase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field()
_lowercase = field()
_lowercase = field()
def _UpperCamelCase( self : List[str] ):
a__ : Dict = BasicEnum(self.required_enum )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = 4_2
_lowercase = field()
_lowercase = None
_lowercase = field(default='toto' , metadata={'help': 'help message'} )
_lowercase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class A__ :
"""simple docstring"""
_lowercase = False
_lowercase = True
_lowercase = None
@dataclass
class A__ :
"""simple docstring"""
_lowercase = None
_lowercase = field(default=_lowercase , metadata={'help': 'help message'} )
_lowercase = None
_lowercase = list_field(default=[] )
_lowercase = list_field(default=[] )
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : Dict , lowerCamelCase__ : argparse.ArgumentParser , lowerCamelCase__ : argparse.ArgumentParser ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
a__ : Union[str, Any] = {k: v for k, v in vars(_snake_case ).items() if k != "container"}
a__ : Tuple = {k: v for k, v in vars(_snake_case ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , _snake_case ) and yy.get("choices" , _snake_case ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](_snake_case ) , yy["type"](_snake_case ) )
del xx["type"], yy["type"]
self.assertEqual(_snake_case , _snake_case )
def _UpperCamelCase( self : str ):
a__ : Tuple = HfArgumentParser(_snake_case )
a__ : Tuple = argparse.ArgumentParser()
expected.add_argument("--foo" , type=_snake_case , required=_snake_case )
expected.add_argument("--bar" , type=_snake_case , required=_snake_case )
expected.add_argument("--baz" , type=_snake_case , required=_snake_case )
expected.add_argument("--flag" , type=_snake_case , default=_snake_case , const=_snake_case , nargs="?" )
self.argparsersEqual(_snake_case , _snake_case )
a__ : List[str] = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((a__ ), ) : Optional[Any] = parser.parse_args_into_dataclasses(_snake_case , look_for_args_file=_snake_case )
self.assertFalse(example.flag )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Any = HfArgumentParser(_snake_case )
a__ : Dict = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=_snake_case )
expected.add_argument("--baz" , default="toto" , type=_snake_case , help="help message" )
self.argparsersEqual(_snake_case , _snake_case )
def _UpperCamelCase( self : Any ):
a__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , type=_snake_case , default=_snake_case , const=_snake_case , nargs="?" )
expected.add_argument("--baz" , type=_snake_case , default=_snake_case , const=_snake_case , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=_snake_case , dest="baz" )
expected.add_argument("--opt" , type=_snake_case , default=_snake_case )
a__ : int = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_snake_case )
for dataclass_type in dataclass_types:
a__ : Dict = HfArgumentParser(_snake_case )
self.argparsersEqual(_snake_case , _snake_case )
a__ : Tuple = parser.parse_args([] )
self.assertEqual(_snake_case , Namespace(foo=_snake_case , baz=_snake_case , opt=_snake_case ) )
a__ : List[str] = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(_snake_case , Namespace(foo=_snake_case , baz=_snake_case , opt=_snake_case ) )
a__ : int = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(_snake_case , Namespace(foo=_snake_case , baz=_snake_case , opt=_snake_case ) )
a__ : Optional[int] = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(_snake_case , Namespace(foo=_snake_case , baz=_snake_case , opt=_snake_case ) )
a__ : Any = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(_snake_case , Namespace(foo=_snake_case , baz=_snake_case , opt=_snake_case ) )
def _UpperCamelCase( self : Optional[int] ):
a__ : List[str] = HfArgumentParser(_snake_case )
a__ : int = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(_snake_case , _snake_case )
a__ : Optional[int] = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
a__ : str = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
a__ : List[Any] = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
a__ : List[str] = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
a__ : Dict = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
a__ : Optional[Any] = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _UpperCamelCase( self : Union[str, Any] ):
@dataclass
class A__ :
"""simple docstring"""
_lowercase = 'toto'
a__ : List[Any] = HfArgumentParser(_snake_case )
a__ : List[Any] = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(_snake_case , _snake_case )
a__ : Dict = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
a__ : Union[str, Any] = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
a__ : str = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def _UpperCamelCase( self : str ):
a__ : Tuple = HfArgumentParser(_snake_case )
a__ : Optional[int] = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=_snake_case )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=_snake_case )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=_snake_case )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=_snake_case )
self.argparsersEqual(_snake_case , _snake_case )
a__ : Optional[int] = parser.parse_args([] )
self.assertEqual(
_snake_case , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
a__ : Optional[int] = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(_snake_case , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Any = argparse.ArgumentParser()
expected.add_argument("--foo" , default=_snake_case , type=_snake_case )
expected.add_argument("--bar" , default=_snake_case , type=_snake_case , help="help message" )
expected.add_argument("--baz" , default=_snake_case , type=_snake_case )
expected.add_argument("--ces" , nargs="+" , default=[] , type=_snake_case )
expected.add_argument("--des" , nargs="+" , default=[] , type=_snake_case )
a__ : Tuple = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_snake_case )
for dataclass_type in dataclass_types:
a__ : Optional[Any] = HfArgumentParser(_snake_case )
self.argparsersEqual(_snake_case , _snake_case )
a__ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(_snake_case , Namespace(foo=_snake_case , bar=_snake_case , baz=_snake_case , ces=[] , des=[] ) )
a__ : Dict = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(_snake_case , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def _UpperCamelCase( self : Any ):
a__ : Optional[int] = HfArgumentParser(_snake_case )
a__ : List[str] = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=_snake_case , required=_snake_case )
expected.add_argument("--required_str" , type=_snake_case , required=_snake_case )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=_snake_case , )
self.argparsersEqual(_snake_case , _snake_case )
def _UpperCamelCase( self : Optional[int] ):
a__ : Union[str, Any] = HfArgumentParser(_snake_case )
a__ : int = argparse.ArgumentParser()
expected.add_argument("--foo" , type=_snake_case , required=_snake_case )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=_snake_case , )
expected.add_argument("--opt" , type=_snake_case , default=_snake_case )
expected.add_argument("--baz" , default="toto" , type=_snake_case , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=_snake_case )
self.argparsersEqual(_snake_case , _snake_case )
def _UpperCamelCase( self : Tuple ):
a__ : str = HfArgumentParser(_snake_case )
a__ : Any = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
a__ : Optional[int] = parser.parse_dict(_snake_case )[0]
a__ : Tuple = BasicExample(**_snake_case )
self.assertEqual(_snake_case , _snake_case )
def _UpperCamelCase( self : str ):
a__ : Optional[Any] = HfArgumentParser(_snake_case )
a__ : Union[str, Any] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(_snake_case , parser.parse_dict , _snake_case , allow_extra_keys=_snake_case )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[Any] = HfArgumentParser(_snake_case )
a__ : List[str] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ : List[Any] = os.path.join(_snake_case , "temp_json" )
os.mkdir(_snake_case )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(_snake_case , _snake_case )
a__ : List[str] = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
a__ : int = BasicExample(**_snake_case )
self.assertEqual(_snake_case , _snake_case )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Optional[int] = HfArgumentParser(_snake_case )
a__ : int = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ : str = os.path.join(_snake_case , "temp_yaml" )
os.mkdir(_snake_case )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(_snake_case , _snake_case )
a__ : Optional[int] = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
a__ : Dict = BasicExample(**_snake_case )
self.assertEqual(_snake_case , _snake_case )
def _UpperCamelCase( self : int ):
a__ : Tuple = HfArgumentParser(_snake_case )
self.assertIsNotNone(_snake_case )
| 37 |
"""simple docstring"""
from __future__ import annotations
def A_ (__a ):
'''simple docstring'''
A_ = 0.00
A_ = 0
for resistor in resistors:
if resistor <= 0:
A_ = f'Resistor at index {index} has a negative or zero value!'
raise ValueError(__a )
first_sum += 1 / float(__a )
index += 1
return 1 / first_sum
def A_ (__a ):
'''simple docstring'''
A_ = 0.00
A_ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
A_ = f'Resistor at index {index} has a negative value!'
raise ValueError(__a )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 115 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Dict = ['pixel_values']
def __init__( self : int , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : float = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[int, float] = 1 / 255 , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , **lowerCamelCase__ : List[Any] , ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__lowercase = size if size is not None else {'''shortest_edge''': 384}
__lowercase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
__lowercase = do_resize
__lowercase = size
# Default value set here for backwards compatibility where the value in config is None
__lowercase = crop_pct if crop_pct is not None else 224 / 256
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : float , lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : str , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
__lowercase = size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__lowercase = int(shortest_edge / crop_pct )
__lowercase = get_resize_output_image_size(lowerCamelCase__ , size=lowerCamelCase__ , default_to_square=lowerCamelCase__ )
__lowercase = resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__ , size=(shortest_edge, shortest_edge) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__ , size=(shortest_edge, shortest_edge) , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : Tuple , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[int, float] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Any , ) -> List[str]:
"""simple docstring"""
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : str , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : Any , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : float = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : float = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase__ : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = crop_pct if crop_pct is not None else self.crop_pct
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
__lowercase = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , crop_pct=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
__lowercase = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 362 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 362 | 1 |
import numpy as np
def lowerCamelCase__ ( _a):
return 1 / (1 + np.exp(-vector))
def lowerCamelCase__ ( _a):
return vector * sigmoid(_a)
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 |
from __future__ import annotations
import math
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = size
# approximate the overall size of segment tree with given value
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
SCREAMING_SNAKE_CASE : Union[str, Any] = [0 for i in range(0 , 4 * size )]
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __UpperCamelCase ( self : Tuple , a : int ) -> int:
"""simple docstring"""
return idx * 2
def __UpperCamelCase ( self : str , a : int ) -> int:
"""simple docstring"""
return idx * 2 + 1
def __UpperCamelCase ( self : int , a : int , a : int , a : int , a : list[int] ) -> None:
"""simple docstring"""
if left_element == right_element:
SCREAMING_SNAKE_CASE : int = a[left_element - 1]
else:
SCREAMING_SNAKE_CASE : Optional[int] = (left_element + right_element) // 2
self.build(self.left(a ) , a , a , a )
self.build(self.right(a ) , mid + 1 , a , a )
SCREAMING_SNAKE_CASE : List[Any] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : int , a : int , a : int , a : int , a : int ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : Any = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[str] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
SCREAMING_SNAKE_CASE : Optional[Any] = val
if left_element != right_element:
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
return True
SCREAMING_SNAKE_CASE : int = (left_element + right_element) // 2
self.update(self.left(a ) , a , a , a , a , a )
self.update(self.right(a ) , mid + 1 , a , a , a , a )
SCREAMING_SNAKE_CASE : Optional[int] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
return True
def __UpperCamelCase ( self : Dict , a : int , a : int , a : int , a : int , a : int ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[Any] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
SCREAMING_SNAKE_CASE : Dict = (left_element + right_element) // 2
SCREAMING_SNAKE_CASE : Tuple = self.query(self.left(a ) , a , a , a , a )
SCREAMING_SNAKE_CASE : Tuple = self.query(self.right(a ) , mid + 1 , a , a , a )
return max(a , a )
def __str__( self : str ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , a , a ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a_ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
a_ = 15
a_ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 25 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : Any = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : List[Any] = '''altclip_text_model'''
def __init__( self , _SCREAMING_SNAKE_CASE=25_0002 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=514 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-05 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=768 , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Any = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = initializer_factor
SCREAMING_SNAKE_CASE_ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any = position_embedding_type
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE_ : Dict = project_dim
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : int = '''altclip_vision_model'''
def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="quick_gelu" , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1.0 , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] = projection_dim
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : int = num_channels
SCREAMING_SNAKE_CASE_ : Tuple = patch_size
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = initializer_factor
SCREAMING_SNAKE_CASE_ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : str = hidden_act
@classmethod
def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
SCREAMING_SNAKE_CASE_ : int = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Any = '''altclip'''
SCREAMING_SNAKE_CASE : Optional[int] = True
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=2.6592 , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('text_config_dict' , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('vision_config_dict' , _SCREAMING_SNAKE_CASE )
super().__init__(**_SCREAMING_SNAKE_CASE )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
SCREAMING_SNAKE_CASE_ : Tuple = {}
# This is the complete result when using `text_config_dict`.
SCREAMING_SNAKE_CASE_ : Tuple = AltCLIPTextConfig(**_SCREAMING_SNAKE_CASE ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
SCREAMING_SNAKE_CASE_ : List[str] = (
f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
f"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
SCREAMING_SNAKE_CASE_ : Dict = (
f"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
f"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(_SCREAMING_SNAKE_CASE )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
SCREAMING_SNAKE_CASE_ : Dict = {}
# This is the complete result when using `vision_config_dict`.
SCREAMING_SNAKE_CASE_ : Dict = AltCLIPVisionConfig(**_SCREAMING_SNAKE_CASE ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = {
str(_SCREAMING_SNAKE_CASE ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
SCREAMING_SNAKE_CASE_ : int = (
f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
f"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
SCREAMING_SNAKE_CASE_ : Dict = (
f"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
f"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(_SCREAMING_SNAKE_CASE )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
SCREAMING_SNAKE_CASE_ : Any = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
SCREAMING_SNAKE_CASE_ : List[Any] = AltCLIPTextConfig(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = AltCLIPVisionConfig(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = projection_dim
SCREAMING_SNAKE_CASE_ : Dict = logit_scale_init_value
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1.0
@classmethod
def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ : List[str] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ : Tuple = self.__class__.model_type
return output
| 706 |
lowerCAmelCase : Tuple = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowerCAmelCase : Optional[int] = {value: key for key, value in MORSE_CODE_DICT.items()}
def A_ ( a ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A_ ( a ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 'Morse code here!'
print(a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = encrypt(a )
print(a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = decrypt(a )
print(a )
if __name__ == "__main__":
main()
| 353 | 0 |
import operator as op
a_ : Dict = '''scaler.pt'''
a_ : Dict = '''pytorch_model'''
a_ : Optional[Any] = '''random_states'''
a_ : List[str] = '''optimizer'''
a_ : int = '''scheduler'''
a_ : Any = '''pytorch_model.bin'''
a_ : str = '''pytorch_model.bin.index.json'''
a_ : List[Any] = '''model.safetensors'''
a_ : Optional[int] = '''model.safetensors.index.json'''
a_ : List[str] = '''1.10.2'''
a_ : List[str] = '''py38'''
a_ : List[str] = '''4.17.0'''
a_ : int = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
a_ : List[Any] = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
a_ : Any = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
a_ : Tuple = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
a_ : Optional[Any] = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
a_ : Tuple = '''2.0.1'''
a_ : Tuple = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
a_ : Optional[int] = ['''default''', '''reduce-overhead''', '''max-autotune''']
a_ : List[str] = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
a_ : str = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
a_ : str = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
a_ : Union[str, Any] = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 73 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _A( yaml.SafeLoader ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _A ):
__A : Optional[int] = [self.constructed_objects[key_node] for key_node, _ in node.value]
__A : Dict = [tuple(_A ) if isinstance(_A , _A ) else key for key in keys]
__A : Tuple = Counter(_A )
__A : Optional[int] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCAmelCase_ ( self , _A , _A=False ):
__A : Union[str, Any] = super().construct_mapping(_A , deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple[Optional[str], str]:
__A : List[str] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__A : List[Any] = full_content[1:].index('---' ) + 1
__A : Dict = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(a )
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase_ ( cls , _A ):
with open(_A , encoding='utf-8' ) as readme_file:
__A , __A : Any = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def UpperCAmelCase_ ( self , _A ):
if path.exists():
with open(_A , encoding='utf-8' ) as readme_file:
__A : Union[str, Any] = readme_file.read()
else:
__A : List[Any] = None
__A : Any = self._to_readme(_A )
with open(_A , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(_A )
def UpperCAmelCase_ ( self , _A = None ):
if readme_content is not None:
__A , __A : str = _split_yaml_from_readme(_A )
__A : Any = '---\n' + self.to_yaml_string() + '---\n' + content
else:
__A : List[Any] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def UpperCAmelCase_ ( cls , _A ):
__A : Optional[int] = yaml.load(_A , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__A : int = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def UpperCAmelCase_ ( self ):
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_A , allow_unicode=_A , encoding='utf-8' , ).decode('utf-8' )
UpperCAmelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCAmelCase : Any = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
UpperCAmelCase : Any = ap.parse_args()
UpperCAmelCase : Any = Path(args.readme_filepath)
UpperCAmelCase : int = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 239 | 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__magic_name__ : Optional[int] = logging.getLogger()
def lowerCAmelCase ( )-> Union[str, Any]:
A_ = argparse.ArgumentParser()
parser.add_argument("-f" )
A_ = parser.parse_args()
return args.f
def lowerCAmelCase ( snake_case__ : List[str] )-> List[str]:
A_ = {}
A_ = os.path.join(snake_case__ , "all_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
A_ = json.load(snake_case__ )
else:
raise ValueError(f'can\'t find {path}' )
return results
def lowerCAmelCase ( )-> Union[str, Any]:
A_ = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
__magic_name__ : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
@classmethod
def lowercase_ ( cls ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
A_ = tempfile.mkdtemp()
A_ = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
A_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def lowercase_ ( cls ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase_ ( self ):
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
A_ = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase_ ( self ):
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
A_ = get_results(__UpperCamelCase )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase_ ( self ):
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
A_ = get_results(__UpperCamelCase )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase_ ( self ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
A_ = 7 if get_gpu_count() > 1 else 2
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
A_ = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase_ ( self ):
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
A_ = get_results(__UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase_ ( self ):
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
A_ = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase_ ( self ):
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
A_ = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase_ ( self ):
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
A_ = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , "translation_no_trainer" ) ) )
@slow
def lowercase_ ( self ):
A_ = logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCamelCase )
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
A_ = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase_ ( self ):
A_ = self.get_auto_remove_tmp_dir()
A_ = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
A_ = get_results(__UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , "image_classification_no_trainer" ) ) )
| 608 |
from sklearn.metrics import recall_score
import datasets
__magic_name__ : List[str] = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
__magic_name__ : str = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
__magic_name__ : Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=1 , __UpperCamelCase="binary" , __UpperCamelCase=None , __UpperCamelCase="warn" , ):
A_ = recall_score(
__UpperCamelCase , __UpperCamelCase , labels=__UpperCamelCase , pos_label=__UpperCamelCase , average=__UpperCamelCase , sample_weight=__UpperCamelCase , zero_division=__UpperCamelCase , )
return {"recall": float(__UpperCamelCase ) if score.size == 1 else score}
| 608 | 1 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , _lowercase : Optional[int] , _lowercase : Tuple=13 , _lowercase : str=7 , _lowercase : List[Any]=True , _lowercase : Optional[int]=True , _lowercase : str=True , _lowercase : Optional[int]=True , _lowercase : Dict=99 , _lowercase : List[Any]=32 , _lowercase : List[str]=5 , _lowercase : str=4 , _lowercase : int=37 , _lowercase : List[str]="gelu" , _lowercase : Any=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : Dict=512 , _lowercase : int=16 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : List[Any]=4 , ):
"""simple docstring"""
_UpperCamelCase: List[str] = parent
_UpperCamelCase: int = batch_size
_UpperCamelCase: List[str] = seq_length
_UpperCamelCase: Optional[int] = is_training
_UpperCamelCase: Optional[Any] = use_attention_mask
_UpperCamelCase: Any = use_token_type_ids
_UpperCamelCase: List[str] = use_labels
_UpperCamelCase: Optional[int] = vocab_size
_UpperCamelCase: List[str] = hidden_size
_UpperCamelCase: Union[str, Any] = num_hidden_layers
_UpperCamelCase: Any = num_attention_heads
_UpperCamelCase: List[str] = intermediate_size
_UpperCamelCase: Union[str, Any] = hidden_act
_UpperCamelCase: Dict = hidden_dropout_prob
_UpperCamelCase: List[str] = attention_probs_dropout_prob
_UpperCamelCase: str = max_position_embeddings
_UpperCamelCase: Dict = type_vocab_size
_UpperCamelCase: Tuple = type_sequence_label_size
_UpperCamelCase: List[Any] = initializer_range
_UpperCamelCase: str = num_choices
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase: Any = None
if self.use_attention_mask:
_UpperCamelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase: Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase: Tuple = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: List[Any] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: int = config_and_inputs
_UpperCamelCase: int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __magic_name__ ( __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : Any = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCamelCase: int = model_class_name.from_pretrained('''albert-base-v2''' )
_UpperCamelCase: List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_UpperCamelCase: List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_UpperCamelCase: Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase: Optional[int] = model(_lowercase , attention_mask=_lowercase )[0]
_UpperCamelCase: Tuple = (1, 11, 768)
self.assertEqual(output.shape , _lowercase )
_UpperCamelCase: Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) ) | 271 | import random
def lowerCAmelCase_ ( lowercase: int , lowercase: float , lowercase: bool = False ) -> dict:
'''simple docstring'''
_UpperCamelCase: dict = {i: [] for i in range(lowercase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowercase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowercase ):
for j in range(i + 1 , lowercase ):
if random.random() < probability:
graph[i].append(lowercase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowercase )
return graph
def lowerCAmelCase_ ( lowercase: int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(lowercase ) if i != j] for i in range(lowercase )
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 271 | 1 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = True , UpperCamelCase = math.inf , UpperCamelCase = -math.inf , UpperCamelCase = math.inf , UpperCamelCase = -math.inf , UpperCamelCase = False , UpperCamelCase = 100 , UpperCamelCase = 0.01 , UpperCamelCase = 1 , ) -> Any:
"""simple docstring"""
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = search_prob
__UpperCAmelCase : str = start_temperate
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Tuple = None
while not search_end:
__UpperCAmelCase : Any = current_state.score()
if best_state is None or current_score > best_state.score():
__UpperCAmelCase : str = current_state
scores.append(UpperCamelCase )
iterations += 1
__UpperCAmelCase : str = None
__UpperCAmelCase : Dict = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__UpperCAmelCase : Dict = random.randint(0 , len(UpperCamelCase ) - 1 ) # picking a random neighbor
__UpperCAmelCase : Tuple = neighbors.pop(UpperCamelCase )
__UpperCAmelCase : List[str] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__UpperCAmelCase : Optional[int] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__UpperCAmelCase : str = picked_neighbor
else:
__UpperCAmelCase : Dict = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__UpperCAmelCase : Union[str, Any] = picked_neighbor
__UpperCAmelCase : str = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__UpperCAmelCase : Optional[int] = True
else:
__UpperCAmelCase : Optional[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(UpperCamelCase ) , UpperCamelCase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Any:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
A = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
A = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
"""simple docstring"""
return (3 * x**2) - (6 * y)
A = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f'''{local_min.score()}'''
)
A = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f'''{local_min.score()}'''
)
| 487 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _UpperCamelCase ( UpperCamelCase ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : str = model.config
__UpperCAmelCase : int = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
__UpperCAmelCase : Union[str, Any] = MBartConfig(
is_decoder=UpperCamelCase , is_encoder_decoder=UpperCamelCase , add_cross_attention=UpperCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=UpperCamelCase , add_final_layer_norm=UpperCamelCase , )
return encoder_config, decoder_config
def _UpperCamelCase ( UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if "encoder.model" in name:
__UpperCAmelCase : Union[str, Any] = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
__UpperCAmelCase : Dict = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
__UpperCAmelCase : Dict = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__UpperCAmelCase : str = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
__UpperCAmelCase : Dict = "encoder." + name
if "attn.proj" in name:
__UpperCAmelCase : List[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
__UpperCAmelCase : int = name.replace("attn" , "attention.self" )
if "norm1" in name:
__UpperCAmelCase : Optional[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__UpperCAmelCase : str = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__UpperCAmelCase : str = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__UpperCAmelCase : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
__UpperCAmelCase : List[Any] = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
__UpperCAmelCase : Tuple = "encoder.layernorm.bias"
return name
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : Any = orig_state_dict.pop(UpperCamelCase )
if "qkv" in key:
__UpperCAmelCase : Any = key.split("." )
__UpperCAmelCase : List[Any] = int(key_split[3] )
__UpperCAmelCase : Any = int(key_split[5] )
__UpperCAmelCase : str = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCAmelCase : List[Any] = val[:dim, :]
__UpperCAmelCase : Union[str, Any] = val[dim : dim * 2, :]
__UpperCAmelCase : Any = val[-dim:, :]
else:
__UpperCAmelCase : List[str] = val[:dim]
__UpperCAmelCase : List[Any] = val[dim : dim * 2]
__UpperCAmelCase : Tuple = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__UpperCAmelCase : Dict = val
return orig_state_dict
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=None , UpperCamelCase=False ) -> List[str]:
"""simple docstring"""
# load original model
__UpperCAmelCase : Dict = DonutModel.from_pretrained(UpperCamelCase ).eval()
# load HuggingFace model
__UpperCAmelCase , __UpperCAmelCase : List[str] = get_configs(UpperCamelCase )
__UpperCAmelCase : Tuple = DonutSwinModel(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = MBartForCausalLM(UpperCamelCase )
__UpperCAmelCase : int = VisionEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase )
model.eval()
__UpperCAmelCase : Union[str, Any] = original_model.state_dict()
__UpperCAmelCase : List[str] = convert_state_dict(UpperCamelCase , UpperCamelCase )
model.load_state_dict(UpperCamelCase )
# verify results on scanned document
__UpperCAmelCase : List[Any] = load_dataset("hf-internal-testing/example-documents" )
__UpperCAmelCase : Any = dataset["test"][0]["image"].convert("RGB" )
__UpperCAmelCase : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase , from_slow=UpperCamelCase )
__UpperCAmelCase : str = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
__UpperCAmelCase : Union[str, Any] = DonutProcessor(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = processor(UpperCamelCase , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__UpperCAmelCase : List[Any] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
__UpperCAmelCase : Any = "When is the coffee break?"
__UpperCAmelCase : Dict = task_prompt.replace("{user_input}" , UpperCamelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__UpperCAmelCase : int = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__UpperCAmelCase : Union[str, Any] = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__UpperCAmelCase : Dict = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__UpperCAmelCase : str = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__UpperCAmelCase : Tuple = "hello world"
else:
raise ValueError("Model name not supported" )
__UpperCAmelCase : Dict = original_model.decoder.tokenizer(UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors="pt" )[
"input_ids"
]
__UpperCAmelCase : int = original_model.encoder.model.patch_embed(UpperCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Tuple = model.encoder.embeddings(UpperCamelCase )
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 )
# verify encoder hidden states
__UpperCAmelCase : int = original_model.encoder(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = model.encoder(UpperCamelCase ).last_hidden_state
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-2 )
# verify decoder hidden states
__UpperCAmelCase : int = original_model(UpperCamelCase , UpperCamelCase , UpperCamelCase ).logits
__UpperCAmelCase : List[str] = model(UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""naver-clova-ix/donut-base-finetuned-docvqa""",
required=False,
type=str,
help="""Name of the original model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
required=False,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub.""",
)
A = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 487 | 1 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : Dict=None , snake_case__ : Optional[int]=None ):
return field(default_factory=lambda: default , metadata=snake_case__ )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: List[str] = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
_lowerCamelCase: List[int] = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
_lowerCamelCase: List[int] = list_field(
default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Benchmark training of model'''} )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Verbose memory tracing'''} )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Trace memory line by line'''} )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Save result to a CSV file'''} )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Save all print statements in a log file'''} )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Whether to print environment information'''} )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
_lowerCamelCase: str = field(
default=F'inference_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
_lowerCamelCase: str = field(
default=F'inference_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
_lowerCamelCase: str = field(
default=F'train_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
_lowerCamelCase: str = field(
default=F'train_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
_lowerCamelCase: str = field(
default=F'env_info_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
_lowerCamelCase: str = field(
default=F'log_{round(time() )}.csv' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
_lowerCamelCase: int = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
warnings.warn(
F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' ,A_ ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
return json.dumps(dataclasses.asdict(self ) ,indent=2 )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True | 91 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[Any] = SamImageProcessor()
SCREAMING_SNAKE_CASE : Optional[Any] = SamProcessor(lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Dict , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).image_processor
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE : Optional[int] = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : int = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 )
SCREAMING_SNAKE_CASE : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[Any] = SamProcessor(image_processor=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : List[str] = image_processor(lowerCamelCase_ , return_tensors="""np""" )
SCREAMING_SNAKE_CASE : Dict = processor(images=lowerCamelCase_ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[Any] = SamProcessor(image_processor=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [torch.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE : int = [[17_64, 26_46]]
SCREAMING_SNAKE_CASE : Any = [[6_83, 10_24]]
SCREAMING_SNAKE_CASE : str = processor.post_process_masks(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
SCREAMING_SNAKE_CASE : str = processor.post_process_masks(
lowerCamelCase_ , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
SCREAMING_SNAKE_CASE : Optional[int] = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE : Optional[int] = processor.post_process_masks(lowerCamelCase_ , np.array(lowerCamelCase_ ) , np.array(lowerCamelCase_ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
SCREAMING_SNAKE_CASE : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = processor.post_process_masks(lowerCamelCase_ , np.array(lowerCamelCase_ ) , np.array(lowerCamelCase_ ) )
@require_vision
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[Any] = SamImageProcessor()
SCREAMING_SNAKE_CASE : Optional[int] = SamProcessor(lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Any , **lowerCamelCase_ : str ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).image_processor
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE : str = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 )
SCREAMING_SNAKE_CASE : List[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.get_image_processor()
SCREAMING_SNAKE_CASE : Dict = SamProcessor(image_processor=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""np""" )
SCREAMING_SNAKE_CASE : List[Any] = processor(images=lowerCamelCase_ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE : Tuple = SamProcessor(image_processor=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [tf.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE : Dict = [[17_64, 26_46]]
SCREAMING_SNAKE_CASE : Any = [[6_83, 10_24]]
SCREAMING_SNAKE_CASE : str = processor.post_process_masks(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
SCREAMING_SNAKE_CASE : Optional[int] = processor.post_process_masks(
lowerCamelCase_ , tf.convert_to_tensor(lowerCamelCase_ ) , tf.convert_to_tensor(lowerCamelCase_ ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
SCREAMING_SNAKE_CASE : Optional[int] = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE : List[str] = processor.post_process_masks(
lowerCamelCase_ , np.array(lowerCamelCase_ ) , np.array(lowerCamelCase_ ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
SCREAMING_SNAKE_CASE : str = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
SCREAMING_SNAKE_CASE : Tuple = processor.post_process_masks(
lowerCamelCase_ , np.array(lowerCamelCase_ ) , np.array(lowerCamelCase_ ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : int = SamImageProcessor()
SCREAMING_SNAKE_CASE : str = SamProcessor(lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Tuple , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).image_processor
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE : Dict = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.get_image_processor()
SCREAMING_SNAKE_CASE : Dict = SamProcessor(image_processor=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
SCREAMING_SNAKE_CASE : str = [tf.convert_to_tensor(lowerCamelCase_ )]
SCREAMING_SNAKE_CASE : Union[str, Any] = [torch.tensor(lowerCamelCase_ )]
SCREAMING_SNAKE_CASE : Tuple = [[17_64, 26_46]]
SCREAMING_SNAKE_CASE : Optional[Any] = [[6_83, 10_24]]
SCREAMING_SNAKE_CASE : Tuple = processor.post_process_masks(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE : Any = processor.post_process_masks(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Union[str, Any] = SamProcessor(image_processor=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : int = image_processor(lowerCamelCase_ , return_tensors="""pt""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE : Tuple = processor(images=lowerCamelCase_ , return_tensors="""pt""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE : Dict = image_processor(lowerCamelCase_ , return_tensors="""tf""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE : Dict = processor(images=lowerCamelCase_ , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
| 379 | 0 |
"""simple docstring"""
from __future__ import annotations
class UpperCAmelCase_ :
def __init__( self : int , _lowercase : int ) -> None:
_lowercase = order
# a_{0} ... a_{k}
_lowercase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_lowercase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_lowercase = [0.0] * self.order
# y[n-1] ... y[n-k]
_lowercase = [0.0] * self.order
def _lowerCamelCase ( self : List[Any] , _lowercase : list[float] , _lowercase : list[float] ) -> None:
if len(_lowercase ) < self.order:
_lowercase = [1.0, *a_coeffs]
if len(_lowercase ) != self.order + 1:
_lowercase = (
f"""Expected a_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(_lowercase )}"""
)
raise ValueError(_lowercase )
if len(_lowercase ) != self.order + 1:
_lowercase = (
f"""Expected b_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(_lowercase )}"""
)
raise ValueError(_lowercase )
_lowercase = a_coeffs
_lowercase = b_coeffs
def _lowerCamelCase ( self : List[str] , _lowercase : float ) -> float:
_lowercase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_lowercase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_lowercase = self.input_history[:-1]
_lowercase = self.output_history[:-1]
_lowercase = sample
_lowercase = result
return result | 227 | """simple docstring"""
# flake8: noqa
# Lint as: python3
__UpperCamelCase : Optional[Any] = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 227 | 1 |
from __future__ import annotations
__lowerCamelCase : Optional[int] = [True] * 1_00_00_01
__lowerCamelCase : Optional[Any] = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
__lowerCamelCase : List[Any] = False
i += 1
def A__ ( _a : int ):
'''simple docstring'''
return seive[n]
def A__ ( _a : int ):
'''simple docstring'''
return any(digit in """02468""" for digit in str(snake_case__ ) )
def A__ ( _a : int = 1000000 ):
'''simple docstring'''
snake_case__ : Any =[2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(snake_case__ ) and not contains_an_even_digit(snake_case__ ):
snake_case__ : Union[str, Any] =str(snake_case__ )
snake_case__ : Any =[int(str_num[j:] + str_num[:j] ) for j in range(len(snake_case__ ) )]
if all(is_prime(snake_case__ ) for i in list_nums ):
result.append(snake_case__ )
return result
def A__ ( ):
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"{len(find_circular_primes()) = }")
| 385 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313 | 0 |
'''simple docstring'''
import re
import subprocess
import sys
__lowercase : int = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
__lowercase : int = (
subprocess.check_output(f'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode('''utf-8''').split()
)
__lowercase : Tuple = '''|'''.join(sys.argv[1:])
__lowercase : List[str] = re.compile(rf'^({joined_dirs}).*?\.py$')
__lowercase : Union[str, Any] = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 721 |
'''simple docstring'''
import itertools
import math
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = 2
while True:
if is_prime(_lowercase ):
yield num
num += 1
def lowercase_ ( _lowercase = 10_001 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , _lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 357 | 0 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_: Optional[int] =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Optional[Any] = XLMProphetNetTokenizer
a__ : Union[str, Any] = False
a__ : Tuple = True
def _lowercase (self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = XLMProphetNetTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase (self : str ):
UpperCAmelCase_ = "[PAD]"
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def _lowercase (self : Any ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__a ) , 1012 )
def _lowercase (self : int ):
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def _lowercase (self : int ):
UpperCAmelCase_ = XLMProphetNetTokenizer(__a , keep_accents=__a )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def _lowercase (self : Optional[int] ):
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def _lowercase (self : List[str] ):
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = [35389, 6672, 49, 2]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def _lowercase (self : int ):
# fmt: off
UpperCAmelCase_ = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 78 | '''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
a__ : int
a__ : TreeNode | None = None
a__ : TreeNode | None = None
SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess')
def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case_ ) != count_coins(snake_case_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case_ )
+ abs(snake_case_ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case_ , snake_case_ )
return get_distrib(snake_case_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowerCamelCase (__lowerCamelCase ):
def __init__( self : str , *lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Optional[Any]=None , **lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
_lowercase : Union[str, Any] = eval_examples
_lowercase : List[Any] = post_process_function
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : str = "eval" ):
"""simple docstring"""
_lowercase : Dict = self.eval_dataset if eval_dataset is None else eval_dataset
_lowercase : Tuple = self.get_eval_dataloader(lowerCamelCase_ )
_lowercase : Optional[int] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_lowercase : Tuple = self.compute_metrics
_lowercase : int = None
_lowercase : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_lowercase : List[str] = time.time()
try:
_lowercase : Any = eval_loop(
lowerCamelCase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase_ , metric_key_prefix=lowerCamelCase_ , )
finally:
_lowercase : Optional[int] = compute_metrics
_lowercase : Optional[Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCamelCase_ , lowerCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_lowercase : Any = self.post_process_function(lowerCamelCase_ , lowerCamelCase_ , output.predictions )
_lowercase : Union[str, Any] = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_lowercase : Optional[Any] = metrics.pop(lowerCamelCase_ )
metrics.update(output.metrics )
else:
_lowercase : List[str] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_lowercase : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase_ )
return metrics
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : str = "test" ):
"""simple docstring"""
_lowercase : Dict = self.get_test_dataloader(lowerCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
_lowercase : Dict = self.compute_metrics
_lowercase : int = None
_lowercase : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_lowercase : Tuple = time.time()
try:
_lowercase : Dict = eval_loop(
lowerCamelCase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase_ , metric_key_prefix=lowerCamelCase_ , )
finally:
_lowercase : List[str] = compute_metrics
_lowercase : Any = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCamelCase_ , lowerCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_lowercase : Tuple = self.post_process_function(lowerCamelCase_ , lowerCamelCase_ , output.predictions , 'predict' )
_lowercase : Dict = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_lowercase : List[str] = metrics.pop(lowerCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase_ )
| 283 | """simple docstring"""
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__UpperCAmelCase ) )
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
if index == len(__UpperCAmelCase ):
return True
# Recursive Step
for i in range(__UpperCAmelCase ):
if valid_coloring(graph[index] ,__UpperCAmelCase ,__UpperCAmelCase ):
# Color current vertex
_lowercase : str = i
# Validate coloring
if util_color(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,index + 1 ):
return True
# Backtrack
_lowercase : List[str] = -1
return False
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
_lowercase : Any = [-1] * len(__UpperCAmelCase )
if util_color(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,0 ):
return colored_vertices
return []
| 283 | 1 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" ,[
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase ,i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
__lowerCAmelCase : List[Any] = _distribute_shards(**__lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" ,[
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] ,)
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Union[str, Any]:
__lowerCAmelCase : str = _split_gen_kwargs(__lowercase ,__lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" ,[
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] ,)
def _lowercase ( __snake_case ,__snake_case ) -> int:
if expected is RuntimeError:
with pytest.raises(__lowercase ):
_number_of_shards_in_gen_kwargs(__lowercase )
else:
__lowerCAmelCase : Dict = _number_of_shards_in_gen_kwargs(__lowercase )
assert out == expected | 293 |
from __future__ import annotations
def a__ (__lowercase :list , __lowercase :int | None = None , __lowercase :int | None = None ) -> None:
if start is None:
_A : Optional[int] = 0
if end is None:
_A : Union[str, Any] = len(__lowercase ) - 1
if start >= end:
return
_A : Union[str, Any] = (start + end) // 2
slowsort(__lowercase , __lowercase , __lowercase )
slowsort(__lowercase , mid + 1 , __lowercase )
if sequence[end] < sequence[mid]:
_A , _A : Optional[int] = sequence[mid], sequence[end]
slowsort(__lowercase , __lowercase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 206 | 0 |
import os
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ = os.path.join(os.path.dirname(_A ) , 'num.txt' )
with open(_A ) as file_hand:
return str(sum(int(_A ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 372 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE:
def __init__( self: Any , UpperCamelCase: Tuple , UpperCamelCase: str=13 , UpperCamelCase: int=7 , UpperCamelCase: Optional[int]=True , UpperCamelCase: Tuple=True , UpperCamelCase: List[Any]=True , UpperCamelCase: Dict=True , UpperCamelCase: Any=99 , UpperCamelCase: int=24 , UpperCamelCase: List[Any]=2 , UpperCamelCase: Any=6 , UpperCamelCase: Union[str, Any]=37 , UpperCamelCase: int="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Union[str, Any]=5_12 , UpperCamelCase: List[Any]=16 , UpperCamelCase: str=2 , UpperCamelCase: Any=0.02 , UpperCamelCase: int=3 , UpperCamelCase: str=None , UpperCamelCase: str=10_00 , ) -> Any:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = scope
snake_case__ = range_bbox
def lowerCAmelCase_ ( self: Optional[Any] ) -> Dict:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case__ = bbox[i, j, 3]
snake_case__ = bbox[i, j, 1]
snake_case__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case__ = bbox[i, j, 2]
snake_case__ = bbox[i, j, 0]
snake_case__ = t
snake_case__ = None
if self.use_input_mask:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase_ ( self: int ) -> Optional[Any]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: str , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple , ) -> Dict:
snake_case__ = LiltModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
snake_case__ = model(UpperCamelCase , bbox=UpperCamelCase , token_type_ids=UpperCamelCase )
snake_case__ = model(UpperCamelCase , bbox=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: str , UpperCamelCase: Union[str, Any] , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , ) -> Tuple:
snake_case__ = self.num_labels
snake_case__ = LiltForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(
UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: List[str] , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , ) -> Any:
snake_case__ = LiltForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(
UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE( a_ , a_ , a_ , unittest.TestCase ):
_UpperCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: Union[str, Any] , UpperCamelCase: int ) -> int:
return True
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
snake_case__ = LiltModelTester(self )
snake_case__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ = type
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
def lowerCAmelCase_ ( self: Dict ) -> int:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
@slow
def lowerCAmelCase_ ( self: int ) -> Any:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = LiltModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_torch
@slow
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: List[Any] ) -> Dict:
snake_case__ = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(UpperCamelCase )
snake_case__ = torch.tensor([[1, 2]] , device=UpperCamelCase )
snake_case__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase )
snake_case__ = torch.Size([1, 2, 7_68] )
snake_case__ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=UpperCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCamelCase , atol=1e-3 ) )
| 372 | 1 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase=None , **_UpperCAmelCase):
'''simple docstring'''
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.' , _UpperCAmelCase , )
super().__init__(args=_UpperCAmelCase , **_UpperCAmelCase) | 8 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"tanreinama/GPTSAN-2.8B-spout_is_uniform": (
"https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[Any] = """gptsan-japanese"""
snake_case : Any = [
"""past_key_values""",
]
snake_case : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __lowerCAmelCase=36000 , __lowerCAmelCase=1280 , __lowerCAmelCase=1024 , __lowerCAmelCase=8192 , __lowerCAmelCase=4096 , __lowerCAmelCase=128 , __lowerCAmelCase=10 , __lowerCAmelCase=0 , __lowerCAmelCase=16 , __lowerCAmelCase=16 , __lowerCAmelCase=128 , __lowerCAmelCase=0.0 , __lowerCAmelCase=1E-5 , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , __lowerCAmelCase="float32" , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=0.002 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=35998 , __lowerCAmelCase=35995 , __lowerCAmelCase=35999 , **__lowerCAmelCase , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = d_model
UpperCamelCase__ = d_ff
UpperCamelCase__ = d_ext
UpperCamelCase__ = d_spout
UpperCamelCase__ = num_switch_layers
UpperCamelCase__ = num_ext_layers
UpperCamelCase__ = num_switch_layers + num_ext_layers
UpperCamelCase__ = num_heads
UpperCamelCase__ = num_experts
UpperCamelCase__ = expert_capacity
UpperCamelCase__ = dropout_rate
UpperCamelCase__ = layer_norm_epsilon
UpperCamelCase__ = router_bias
UpperCamelCase__ = router_jitter_noise
UpperCamelCase__ = router_dtype
UpperCamelCase__ = router_ignore_padding_tokens
UpperCamelCase__ = output_hidden_states
UpperCamelCase__ = output_attentions
UpperCamelCase__ = initializer_factor
UpperCamelCase__ = output_router_logits
UpperCamelCase__ = use_cache
super().__init__(
separator_token_id=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
| 619 | 0 |
"""simple docstring"""
from PIL import Image
def lowercase (_snake_case ,_snake_case ) -> Image:
'''simple docstring'''
def brightness(_snake_case ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(_snake_case )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
_A = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png") | 718 |
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __lt__( self : List[str] , A_ : Optional[int] )-> List[str]:
return self[-1] < other[-1]
def __eq__( self : Tuple , A_ : List[Any] )-> Optional[int]:
return self[-1] == other[-1]
def lowercase (_snake_case ) -> list:
'''simple docstring'''
__UpperCamelCase = []
# sort into stacks
for element in collection:
__UpperCamelCase = Stack([element] )
__UpperCamelCase = bisect_left(_snake_case ,_snake_case )
if i != len(_snake_case ):
stacks[i].append(_snake_case )
else:
stacks.append(_snake_case )
# use a heap-based merge to merge stack efficiently
__UpperCamelCase = merge(*(reversed(_snake_case ) for stack in stacks) )
return collection
if __name__ == "__main__":
_A = input("Enter numbers separated by a comma:\n").strip()
_A = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted)) | 228 | 0 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__A = getLogger(__name__)
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] = 8 , UpperCamelCase__ : str = 1024 , UpperCamelCase__ : List[str]="val" , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : List[Any]="summarization" , UpperCamelCase__ : str=None , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : List[Any] = None , UpperCamelCase__ : Optional[int]="" , **UpperCamelCase__ : Any , ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = str(UpperCAmelCase__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=UpperCAmelCase__ )
__lowerCamelCase = Path(UpperCAmelCase__ )
__lowerCamelCase = save_dir.joinpath(F"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(UpperCAmelCase__ )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ ).cuda()
if fpaa:
__lowerCamelCase = model.half()
# determine if we need to increase num_beams
use_task_specific_params(UpperCAmelCase__ , UpperCAmelCase__ ) # update config with task specific params
__lowerCamelCase = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__lowerCamelCase = num_return_sequences
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
__lowerCamelCase = tokenizer.model_max_length
if prefix is None:
__lowerCamelCase = prefix or getattr(model.config , 'prefix' , '' ) or ''
__lowerCamelCase = SeqaSeqDataset(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , max_target_length=1024 , type_path=UpperCAmelCase__ , n_obs=UpperCAmelCase__ , prefix=UpperCAmelCase__ , **UpperCAmelCase__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__lowerCamelCase = ds.make_sortish_sampler(UpperCAmelCase__ , distributed=UpperCAmelCase__ , add_extra_examples=UpperCAmelCase__ , shuffle=UpperCAmelCase__ )
__lowerCamelCase = DataLoader(UpperCAmelCase__ , sampler=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , collate_fn=ds.collate_fn )
__lowerCamelCase = []
for batch in tqdm(UpperCAmelCase__ ):
__lowerCamelCase = model.generate(
input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=UpperCAmelCase__ , num_beams=UpperCAmelCase__ , **UpperCAmelCase__ , )
__lowerCamelCase = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ )
__lowerCamelCase = batch['ids']
if num_return_sequences > 1:
__lowerCamelCase = chunks(UpperCAmelCase__ , UpperCAmelCase__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(UpperCAmelCase__ ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(UpperCAmelCase__ , UpperCAmelCase__ )
return results, sampler.num_replicas
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' , type=UpperCAmelCase__ , help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' , type=UpperCAmelCase__ , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=UpperCAmelCase__ , help='where to save' , default='tmp_gen' )
parser.add_argument('--max_source_length' , type=UpperCAmelCase__ , default=UpperCAmelCase__ )
parser.add_argument(
'--type_path' , type=UpperCAmelCase__ , default='test' , help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' , type=UpperCAmelCase__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=UpperCAmelCase__ , default=8 , required=UpperCAmelCase__ , help='batch size' )
parser.add_argument(
'--local_rank' , type=UpperCAmelCase__ , default=-1 , required=UpperCAmelCase__ , help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , required=UpperCAmelCase__ , help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' , type=UpperCAmelCase__ , default=1 , required=UpperCAmelCase__ , help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' , type=UpperCAmelCase__ , default=600 , required=UpperCAmelCase__ , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , required=UpperCAmelCase__ )
parser.add_argument('--tgt_lang' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , required=UpperCAmelCase__ )
parser.add_argument(
'--prefix' , type=UpperCAmelCase__ , required=UpperCAmelCase__ , default=UpperCAmelCase__ , help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--debug' , action='store_true' )
__lowerCamelCase = time.time()
__lowerCamelCase , __lowerCamelCase = parser.parse_known_args()
__lowerCamelCase = parse_numeric_n_bool_cl_kwargs(UpperCAmelCase__ )
if generate_kwargs and args.local_rank <= 0:
print(F"""parsed the following generate kwargs: {generate_kwargs}""" )
__lowerCamelCase = Path(args.save_dir + '_tmp' )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) # this handles locking.
__lowerCamelCase = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(F"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__lowerCamelCase = {}
if args.src_lang is not None:
__lowerCamelCase = args.src_lang
if args.tgt_lang is not None:
__lowerCamelCase = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=UpperCAmelCase__ )
__lowerCamelCase , __lowerCamelCase = eval_data_dir(
args.data_dir , UpperCAmelCase__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
if args.local_rank <= 0:
__lowerCamelCase = Path(args.save_dir )
save_dir.mkdir(exist_ok=UpperCAmelCase__ )
__lowerCamelCase = gather_results_from_each_node(UpperCAmelCase__ , UpperCAmelCase__ , args.sync_timeout )
__lowerCamelCase = combine_partial_results(UpperCAmelCase__ )
if args.num_return_sequences > 1:
__lowerCamelCase = save_dir.joinpath('pseudolabel_results.json' )
print(F"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(UpperCAmelCase__ , UpperCAmelCase__ )
return
__lowerCamelCase = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(UpperCAmelCase__ ) as f:
__lowerCamelCase = [x.rstrip() for x in f.readlines()][: len(UpperCAmelCase__ )]
# Calculate metrics, save metrics, and save _generations.txt
__lowerCamelCase = 'translation' in args.task
__lowerCamelCase = calculate_bleu if calc_bleu else calculate_rouge
__lowerCamelCase = 'bleu' if calc_bleu else 'rouge'
__lowerCamelCase = score_fn(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCamelCase = len(UpperCAmelCase__ )
__lowerCamelCase = time.time() - start_time
__lowerCamelCase = round(runtime / metrics['n_obs'] , 4 )
__lowerCamelCase = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__lowerCamelCase = save_dir.joinpath(F"""{args.type_path}_{metric_name}.json""" )
save_json(UpperCAmelCase__ , UpperCAmelCase__ , indent=UpperCAmelCase__ )
print(UpperCAmelCase__ )
write_txt_file(UpperCAmelCase__ , save_dir.joinpath(F"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(UpperCAmelCase__ , save_dir.joinpath(F"""{args.type_path}.target""" ) )
else:
shutil.rmtree(UpperCAmelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = []
for partial_result in partial_results:
records.extend(UpperCAmelCase__ )
__lowerCamelCase = sorted(UpperCAmelCase__ , key=lambda UpperCamelCase__ : x["id"] )
__lowerCamelCase = [x['pred'] for x in records]
return preds
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = time.time()
logger.info('waiting for all nodes to finish' )
__lowerCamelCase = None
while (time.time() - start_wait) < timeout:
__lowerCamelCase = list(save_dir.glob('rank_*.json' ) )
if len(UpperCAmelCase__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__lowerCamelCase = lmap(UpperCAmelCase__ , UpperCAmelCase__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 469 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __lowercase :
"""simple docstring"""
def __magic_name__ ( self , A_ , A_ , A_ )-> Union[str, Any]:
return None
class __lowercase :
"""simple docstring"""
def __magic_name__ ( self , A_ , A_ , A_ , A_ )-> int:
return None
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __magic_name__ ( self )-> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(A_ , 'tf' , 12 , **A_ )
@require_torch
@slow
def __magic_name__ ( self )-> Union[str, Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(A_ , 'pt' , 12 , **A_ )
@require_torch
@slow
def __magic_name__ ( self )-> List[str]:
from transformers import BertModel
_SCREAMING_SNAKE_CASE = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(A_ ) )
vocab_file.flush()
_SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(A_ ) ) )
model.save_pretrained(A_ )
self._test_export(A_ , 'pt' , 12 , A_ )
@require_tf
@slow
def __magic_name__ ( self )-> Dict:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_SCREAMING_SNAKE_CASE = self._test_export(A_ , 'tf' , 12 , **A_ )
_SCREAMING_SNAKE_CASE = quantize(Path(A_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(A_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def __magic_name__ ( self )-> List[str]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_SCREAMING_SNAKE_CASE = self._test_export(A_ , 'pt' , 12 , **A_ )
_SCREAMING_SNAKE_CASE = quantize(A_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(A_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def __magic_name__ ( self , A_ , A_ , A_ , A_=None , **A_ )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
_SCREAMING_SNAKE_CASE = Path(A_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(A_ , A_ , A_ , A_ , A_ , **A_ )
return path
except Exception as e:
self.fail(A_ )
@require_torch
@require_tokenizers
@slow
def __magic_name__ ( self )-> List[str]:
from transformers import BertModel
_SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(A_ , A_ , 'pt' )
@require_tf
@require_tokenizers
@slow
def __magic_name__ ( self )-> Optional[int]:
from transformers import TFBertModel
_SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(A_ , A_ , 'tf' )
def __magic_name__ ( self , A_ , A_ , A_ )-> List[str]:
_SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(A_ , A_ )
_SCREAMING_SNAKE_CASE = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = infer_shapes(A_ , A_ )
# Assert all variables are present
self.assertEqual(len(A_ ) , len(A_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , A_ )
self.assertSequenceEqual(variable_names[3:] , A_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def __magic_name__ ( self )-> Dict:
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask', 'token_type_ids']
_SCREAMING_SNAKE_CASE = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , A_ , A_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(A_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(A_ ) , set(A_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(A_ , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , A_ , A_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(A_ ) , 1 )
self.assertEqual(len(A_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def __magic_name__ ( self )-> Optional[Any]:
_SCREAMING_SNAKE_CASE = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 605 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a= logging.get_logger(__name__)
a= '''▁'''
a= {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
a= {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
a= {
'''facebook/s2t-small-librispeech-asr''': 1_0_2_4,
}
a= ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
a= {'''mustc''': MUSTC_LANGS}
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = MAX_MODEL_INPUT_SIZES
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ = []
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<unk>" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = None , **_lowerCamelCase , ):
__UpperCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
__UpperCamelCase : Union[str, Any] = do_upper_case
__UpperCamelCase : Dict = do_lower_case
__UpperCamelCase : List[str] = load_json(_lowerCamelCase )
__UpperCamelCase : List[Any] = {v: k for k, v in self.encoder.items()}
__UpperCamelCase : int = spm_file
__UpperCamelCase : List[Any] = load_spm(_lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
__UpperCamelCase : Any = lang_codes
__UpperCamelCase : Any = LANGUAGES[lang_codes]
__UpperCamelCase : str = [f"""<lang:{lang}>""" for lang in self.langs]
__UpperCamelCase : List[str] = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
__UpperCamelCase : str = self.lang_tokens
__UpperCamelCase : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__UpperCamelCase : Dict = {}
@property
def lowerCAmelCase ( self ):
return len(self.encoder )
@property
def lowerCAmelCase ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : Optional[int] = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : int = self.lang_code_to_id[tgt_lang]
__UpperCamelCase : List[str] = [lang_code_id]
def lowerCAmelCase ( self , _lowerCamelCase ):
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase ):
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def lowerCAmelCase ( self , _lowerCamelCase ):
return self.decoder.get(_lowerCamelCase , self.unk_token )
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : List[str] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__UpperCamelCase : int = self.sp_model.decode(_lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__UpperCamelCase : int = []
else:
current_sub_tokens.append(_lowerCamelCase )
__UpperCamelCase : str = self.sp_model.decode(_lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
__UpperCamelCase : Union[str, Any] = [1] * len(self.prefix_tokens )
__UpperCamelCase : Any = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def lowerCAmelCase ( self ):
__UpperCamelCase : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__UpperCamelCase : int = self.__dict__.copy()
__UpperCamelCase : Dict = None
return state
def __setstate__( self , _lowerCamelCase ):
__UpperCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase : Optional[int] = {}
__UpperCamelCase : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ):
__UpperCamelCase : List[str] = Path(_lowerCamelCase )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
__UpperCamelCase : Optional[int] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , 'wb' ) as fi:
__UpperCamelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def _UpperCamelCase ( _a : str , _a : Dict[str, Any] ):
"""simple docstring"""
__UpperCamelCase : List[Any] = sentencepiece.SentencePieceProcessor(**_a )
spm.Load(str(_a ) )
return spm
def _UpperCamelCase ( _a : str ):
"""simple docstring"""
with open(_a , 'r' ) as f:
return json.load(_a )
def _UpperCamelCase ( _a : Any , _a : str ):
"""simple docstring"""
with open(_a , 'w' ) as f:
json.dump(_a , _a , indent=2 )
| 287 | '''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _UpperCamelCase ( _a : NDArray[floataa] , _a : NDArray[floataa] , _a : list[int] , _a : int , ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = coefficient_matrix.shape
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
__UpperCamelCase : Tuple = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_a )
if colsa != 1:
__UpperCamelCase : List[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_a )
if rowsa != rowsa:
__UpperCamelCase : Optional[int] = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_a )
if len(_a ) != rowsa:
__UpperCamelCase : Union[str, Any] = (
'Number of initial values must be equal to number of rows in coefficient '
f"""matrix but received {len(_a )} and {rowsa}"""
)
raise ValueError(_a )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
__UpperCamelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = table.shape
strictly_diagonally_dominant(_a )
# Iterates the whole matrix for given number of times
for _ in range(_a ):
__UpperCamelCase : List[Any] = []
for row in range(_a ):
__UpperCamelCase : List[Any] = 0
for col in range(_a ):
if col == row:
__UpperCamelCase : Optional[int] = table[row][col]
elif col == cols - 1:
__UpperCamelCase : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCamelCase : Any = (temp + val) / denom
new_val.append(_a )
__UpperCamelCase : List[Any] = new_val
return [float(_a ) for i in new_val]
def _UpperCamelCase ( _a : NDArray[floataa] ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : str = table.shape
__UpperCamelCase : str = True
for i in range(0 , _a ):
__UpperCamelCase : Optional[Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 1 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class a_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
A = FlaxAutoencoderKL
@property
def A_( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ = jax.random.uniform(SCREAMING_SNAKE_CASE , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def A_( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
| 205 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class a_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
A = ShapEPipeline
A = ['''prompt''']
A = ['''prompt''']
A = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A = False
@property
def A_( self ) -> Optional[Any]:
"""simple docstring"""
return 32
@property
def A_( self ) -> Dict:
"""simple docstring"""
return 32
@property
def A_( self ) -> str:
"""simple docstring"""
return self.time_input_dim * 4
@property
def A_( self ) -> str:
"""simple docstring"""
return 8
@property
def A_( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A_( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE )
@property
def A_( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
SCREAMING_SNAKE_CASE_ = PriorTransformer(**SCREAMING_SNAKE_CASE )
return model
@property
def A_( self ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE_ = ShapERenderer(**SCREAMING_SNAKE_CASE )
return model
def A_( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.dummy_prior
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_ = self.dummy_renderer
SCREAMING_SNAKE_CASE_ = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE , clip_sample=SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE_ = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> str:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def A_( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'cpu'
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ = output.images[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A_( self ) -> List[str]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A_( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch_device == 'cpu'
SCREAMING_SNAKE_CASE_ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE , relax_max_difference=SCREAMING_SNAKE_CASE , )
def A_( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE_ = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def A_( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
SCREAMING_SNAKE_CASE_ = ShapEPipeline.from_pretrained('openai/shap-e' )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 205 | 1 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : int = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCamelCase__ : Tuple = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCamelCase__ : Dict = 4
lowerCamelCase__ : Optional[Any] = 48
lowerCamelCase__ : str = '''pixelshuffle_aux'''
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCamelCase__ : List[str] = [6, 6, 6, 6]
lowerCamelCase__ : Any = 60
lowerCamelCase__ : int = [6, 6, 6, 6]
lowerCamelCase__ : Dict = '''pixelshuffledirect'''
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCamelCase__ : int = 4
lowerCamelCase__ : List[Any] = '''nearest+conv'''
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : List[Any] = 126
lowerCamelCase__ : Union[str, Any] = 7
lowerCamelCase__ : Union[str, Any] = 2_55.0
lowerCamelCase__ : Optional[Any] = ''''''
return config
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
lowerCamelCase__ : Optional[int] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ : List[str] = name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' )
if "layers" in name:
lowerCamelCase__ : str = name.replace('''layers''' , '''encoder.stages''' )
if "residual_group.blocks" in name:
lowerCamelCase__ : List[str] = name.replace('''residual_group.blocks''' , '''layers''' )
if "attn.proj" in name:
lowerCamelCase__ : Any = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCamelCase__ : Dict = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
lowerCamelCase__ : int = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
lowerCamelCase__ : Optional[int] = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
lowerCamelCase__ : Optional[Any] = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
lowerCamelCase__ : Optional[Any] = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Tuple = name.replace('''patch_embed.proj''' , '''patch_embed.projection''' )
if name == "norm.weight":
lowerCamelCase__ : Dict = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase__ : str = '''layernorm.bias'''
if "conv_first" in name:
lowerCamelCase__ : List[Any] = name.replace('''conv_first''' , '''first_convolution''' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCamelCase__ : Any = name.replace('''conv_last''' , '''final_convolution''' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' )
if "upsample.0" in name:
lowerCamelCase__ : Optional[Any] = name.replace('''upsample.0''' , '''upsample.convolution_0''' )
if "upsample.2" in name:
lowerCamelCase__ : List[str] = name.replace('''upsample.2''' , '''upsample.convolution_1''' )
lowerCamelCase__ : List[str] = '''upsample.''' + name
elif config.upsampler == "pixelshuffledirect":
lowerCamelCase__ : int = name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' )
lowerCamelCase__ : int = name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' )
else:
pass
else:
lowerCamelCase__ : Any = '''swin2sr.''' + name
return name
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : str = orig_state_dict.pop(UpperCAmelCase )
if "qkv" in key:
lowerCamelCase__ : Union[str, Any] = key.split('''.''' )
lowerCamelCase__ : Dict = int(key_split[1] )
lowerCamelCase__ : int = int(key_split[4] )
lowerCamelCase__ : Optional[Any] = config.embed_dim
if "weight" in key:
lowerCamelCase__ : str = val[:dim, :]
lowerCamelCase__ : Tuple = val[dim : dim * 2, :]
lowerCamelCase__ : Tuple = val[-dim:, :]
else:
lowerCamelCase__ : List[str] = val[:dim]
lowerCamelCase__ : Any = val[dim : dim * 2]
lowerCamelCase__ : Union[str, Any] = val[-dim:]
pass
else:
lowerCamelCase__ : Optional[int] = val
return orig_state_dict
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Tuple = get_config(UpperCAmelCase )
lowerCamelCase__ : List[Any] = SwinaSRForImageSuperResolution(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Any = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )
lowerCamelCase__ : List[str] = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
raise ValueError('''Missing keys when converting: {}'''.format(UpperCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
lowerCamelCase__ : Dict = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'''
lowerCamelCase__ : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ).convert('''RGB''' )
lowerCamelCase__ : List[Any] = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCamelCase__ : List[str] = 126 if '''Jpeg''' in checkpoint_url else 256
lowerCamelCase__ : str = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
lowerCamelCase__ : str = transforms(UpperCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
lowerCamelCase__ : str = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCamelCase__ : int = torch.Size([1, 3, 512, 512] )
lowerCamelCase__ : Optional[int] = torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCamelCase__ : Optional[int] = torch.Size([1, 3, 1024, 1024] )
lowerCamelCase__ : int = torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCamelCase__ : Any = torch.Size([1, 3, 1024, 1024] )
lowerCamelCase__ : int = torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCamelCase__ : Optional[int] = torch.Size([1, 3, 512, 512] )
lowerCamelCase__ : List[Any] = torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
lowerCamelCase__ : int = torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , UpperCAmelCase , atol=1E-3 )
print('''Looks ok!''' )
lowerCamelCase__ : Optional[int] = {
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': (
'''swin2SR-classical-sr-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': (
'''swin2SR-classical-sr-x4-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': (
'''swin2SR-compressed-sr-x4-48'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': (
'''swin2SR-lightweight-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': (
'''swin2SR-realworld-sr-x4-64-bsrgan-psnr'''
),
}
lowerCamelCase__ : Optional[int] = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
_A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
_A : List[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 130 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : List[str] = LEDTokenizer
_UpperCAmelCase : Dict = LEDTokenizerFast
_UpperCAmelCase : str = True
def __lowerCamelCase ( self : int ) ->Dict:
super().setUp()
lowerCamelCase__ : List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase__ : Union[str, Any] = dict(zip(A , range(len(A ) ) ) )
lowerCamelCase__ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase__ : List[str] = {'''unk_token''': '''<unk>'''}
lowerCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def __lowerCamelCase ( self : List[Any] , **A : Optional[Any] ) ->Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def __lowerCamelCase ( self : List[Any] , **A : Optional[int] ) ->int:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def __lowerCamelCase ( self : Dict , A : List[Any] ) ->Dict:
return "lower newer", "lower newer"
@cached_property
def __lowerCamelCase ( self : List[Any] ) ->Optional[Any]:
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __lowerCamelCase ( self : Optional[int] ) ->Optional[int]:
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __lowerCamelCase ( self : List[str] ) ->Optional[Any]:
lowerCamelCase__ : Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase__ : Tuple = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ : str = tokenizer(A , max_length=len(A ) , padding=A , return_tensors='''pt''' )
self.assertIsInstance(A , A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase__ : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(A , A )
@require_torch
def __lowerCamelCase ( self : Any ) ->Any:
lowerCamelCase__ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ : int = tokenizer(A , padding=A , return_tensors='''pt''' )
self.assertIn('''input_ids''' , A )
self.assertIn('''attention_mask''' , A )
self.assertNotIn('''labels''' , A )
self.assertNotIn('''decoder_attention_mask''' , A )
@require_torch
def __lowerCamelCase ( self : str ) ->Union[str, Any]:
lowerCamelCase__ : Tuple = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ : Dict = tokenizer(text_target=A , max_length=3_2 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowerCamelCase ( self : List[str] ) ->Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ : Any = tokenizer(
['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] , padding=A , truncation=A , return_tensors='''pt''' )
self.assertIsInstance(A , A )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def __lowerCamelCase ( self : int ) ->List[str]:
lowerCamelCase__ : List[str] = ['''A long paragraph for summarization.''']
lowerCamelCase__ : int = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ : Union[str, Any] = tokenizer(A , return_tensors='''pt''' )
lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=A , return_tensors='''pt''' )
lowerCamelCase__ : Tuple = inputs['''input_ids''']
lowerCamelCase__ : Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCamelCase ( self : List[Any] ) ->Optional[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ : Any = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase__ : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase__ : Any = tokenizer(A , padding=A )
lowerCamelCase__ : List[Any] = [[0] * len(A ) for x in encoded_output['''input_ids''']]
lowerCamelCase__ : List[Any] = tokenizer.pad(A )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , A )
def __lowerCamelCase ( self : int ) ->str:
pass
def __lowerCamelCase ( self : str ) ->Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase__ : Any = self.rust_tokenizer_class.from_pretrained(A , **A )
lowerCamelCase__ : Tuple = self.tokenizer_class.from_pretrained(A , **A )
lowerCamelCase__ : List[Any] = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase__ : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
lowerCamelCase__ : Optional[Any] = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase__ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase__ : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 130 | 1 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : str , _A : List[Any] , _A : Union[str, Any]=13 , _A : List[Any]=7 , _A : Any=True , _A : Dict=True , _A : Union[str, Any]=False , _A : Optional[Any]=True , _A : str=99 , _A : Any=32 , _A : Optional[int]=5 , _A : Dict=4 , _A : Dict=37 , _A : Any="gelu" , _A : Union[str, Any]=0.1 , _A : Tuple=0.1 , _A : List[Any]=512 , _A : Dict=16 , _A : Any=2 , _A : List[Any]=0.02 , _A : Tuple=3 , _A : Dict=4 , _A : Tuple=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = parent
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = seq_length
__SCREAMING_SNAKE_CASE : str = is_training
__SCREAMING_SNAKE_CASE : Tuple = use_input_mask
__SCREAMING_SNAKE_CASE : Optional[int] = use_token_type_ids
__SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : str = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : str = num_labels
__SCREAMING_SNAKE_CASE : str = num_choices
__SCREAMING_SNAKE_CASE : List[Any] = scope
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Any = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : str = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : str , _A : Optional[Any] , _A : Optional[int] , _A : Tuple , _A : Tuple , _A : Optional[Any] , _A : List[Any] , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = BioGptModel(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : str = model(_A , attention_mask=_A )
__SCREAMING_SNAKE_CASE : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[str] , _A : Optional[Any] , _A : Any , _A : List[str] , _A : List[str] , _A : Optional[Any] , _A : Optional[Any] , _A : int , _A : Tuple , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = BioGptForCausalLM(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Optional[Any] , _A : Any , _A : Union[str, Any] , _A : Dict , _A : List[str] , _A : Union[str, Any] , *_A : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = BioGptModel(config=_A )
model.to(_A )
model.eval()
# create attention mask
__SCREAMING_SNAKE_CASE : Any = torch.ones(input_ids.shape , dtype=torch.long , device=_A )
__SCREAMING_SNAKE_CASE : Any = self.seq_length // 2
__SCREAMING_SNAKE_CASE : List[Any] = 0
# first forward pass
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = model(_A , attention_mask=_A ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((1,) , _A ).item() + 1
__SCREAMING_SNAKE_CASE : int = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__SCREAMING_SNAKE_CASE : Any = random_other_next_tokens
# append to next input_ids and attn_mask
__SCREAMING_SNAKE_CASE : int = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_A )] , dim=1 , )
# get two different outputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )['''last_hidden_state''']
__SCREAMING_SNAKE_CASE : List[str] = model(_A , past_key_values=_A , attention_mask=_A )['''last_hidden_state''']
# select random slice
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE : List[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : List[Any] , _A : List[str] , _A : Dict , _A : int , *_A : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BioGptModel(config=_A ).to(_A ).eval()
__SCREAMING_SNAKE_CASE : str = torch.ones(input_ids.shape , dtype=torch.long , device=_A )
# first forward pass
__SCREAMING_SNAKE_CASE : List[Any] = model(_A , attention_mask=_A , use_cache=_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(_A , attention_mask=_A )['''last_hidden_state''']
__SCREAMING_SNAKE_CASE : str = model(_A , attention_mask=_A , past_key_values=_A )[
'''last_hidden_state'''
]
# select random slice
__SCREAMING_SNAKE_CASE : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Any , _A : List[str] , _A : str , _A : Dict , _A : List[str] , _A : int , *_A : Optional[Any] , _A : str=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = BioGptForCausalLM(_A )
model.to(_A )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__SCREAMING_SNAKE_CASE : Dict = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase__ ( self : List[Any] , _A : str , *_A : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BioGptModel(_A )
__SCREAMING_SNAKE_CASE : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def UpperCAmelCase__ ( self : str , _A : Union[str, Any] , _A : str , _A : Dict , _A : List[str] , _A : Dict , *_A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Any = BioGptForTokenClassification(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(_A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
),
) : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (BioGptForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptModelTester(self )
__SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE : Any = type
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_A )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_A , gradient_checkpointing=_A )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_A )
@slow
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_A )
__SCREAMING_SNAKE_CASE : List[Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__SCREAMING_SNAKE_CASE : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token
__SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id
# use different length sentences to test batching
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(_A , return_tensors='''pt''' , padding=_A )
__SCREAMING_SNAKE_CASE : List[Any] = inputs['''input_ids'''].to(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(
input_ids=_A , attention_mask=inputs['''attention_mask'''].to(_A ) , )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_A )
__SCREAMING_SNAKE_CASE : int = model.generate(input_ids=_A )
__SCREAMING_SNAKE_CASE : Tuple = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__SCREAMING_SNAKE_CASE : Any = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_A )
__SCREAMING_SNAKE_CASE : Dict = model.generate(input_ids=_A , max_length=model.config.max_length - num_paddings )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.batch_decode(_A , skip_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Any = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : List[Any] = BioGptModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Optional[Any] = 3
__SCREAMING_SNAKE_CASE : str = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE : List[Any] = input_ids.ne(1 ).to(_A )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptForSequenceClassification(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : List[Any] = 3
__SCREAMING_SNAKE_CASE : Optional[int] = '''multi_label_classification'''
__SCREAMING_SNAKE_CASE : List[Any] = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE : Optional[int] = input_ids.ne(1 ).to(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__SCREAMING_SNAKE_CASE : Optional[int] = BioGptForSequenceClassification(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[2, 4805, 9, 656, 21]] )
__SCREAMING_SNAKE_CASE : Tuple = model(_A )[0]
__SCREAMING_SNAKE_CASE : int = 4_2384
__SCREAMING_SNAKE_CASE : List[Any] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _A )
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
@slow
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__SCREAMING_SNAKE_CASE : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_A )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(
**_A , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=_A , )
__SCREAMING_SNAKE_CASE : int = tokenizer.decode(output_ids[0] , skip_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_A , _A )
| 74 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = """Hello world! cécé herlolip"""
def lowerCamelCase_ ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : bool ):
lowerCamelCase_ = FairseqRobertaModel.from_pretrained(_lowerCamelCase )
roberta.eval() # disable dropout
lowerCamelCase_ = roberta.model.encoder.sentence_encoder
lowerCamelCase_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , _lowerCamelCase )
lowerCamelCase_ = XLMRobertaXLForSequenceClassification(_lowerCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(_lowerCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ = roberta_sent_encoder.embed_tokens.weight
lowerCamelCase_ = roberta_sent_encoder.embed_positions.weight
lowerCamelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowerCamelCase_ = roberta_sent_encoder.layer_norm.weight
lowerCamelCase_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ = model.roberta.encoder.layer[i]
lowerCamelCase_ = roberta_sent_encoder.layers[i]
lowerCamelCase_ = layer.attention
lowerCamelCase_ = roberta_layer.self_attn_layer_norm.weight
lowerCamelCase_ = roberta_layer.self_attn_layer_norm.bias
# self attention
lowerCamelCase_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowerCamelCase_ = roberta_layer.self_attn.q_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.q_proj.bias
lowerCamelCase_ = roberta_layer.self_attn.k_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.k_proj.bias
lowerCamelCase_ = roberta_layer.self_attn.v_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowerCamelCase_ = roberta_layer.self_attn.out_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowerCamelCase_ = roberta_layer.final_layer_norm.weight
lowerCamelCase_ = roberta_layer.final_layer_norm.bias
# intermediate
lowerCamelCase_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase_ = roberta_layer.fca.weight
lowerCamelCase_ = roberta_layer.fca.bias
# output
lowerCamelCase_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase_ = roberta_layer.fca.weight
lowerCamelCase_ = roberta_layer.fca.bias
# end of layer
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase_ = roberta.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase_ = roberta.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase_ = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase_ = roberta.model.encoder.lm_head.dense.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.dense.bias
lowerCamelCase_ = roberta.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ = roberta.model.encoder.lm_head.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ = roberta.encode(_lowerCamelCase ).unsqueeze(0 ) # batch of size 1
lowerCamelCase_ = model(_lowerCamelCase )[0]
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_lowerCamelCase ) )
else:
lowerCamelCase_ = roberta.model(_lowerCamelCase )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ = torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(_lowerCamelCase ).mkdir(parents=_lowerCamelCase , exist_ok=_lowerCamelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
__lowercase : Dict = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
) | 142 | 0 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__A = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a ( datasets.BuilderConfig ):
A_ : Optional[datasets.Features] = None
def UpperCamelCase ( _lowerCAmelCase : "pyspark.sql.DataFrame" , _lowerCAmelCase : List[int] , ):
import pyspark
def generate_fn():
__a = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
__a = df_with_partition_id.select("""*""" ).where(f"""part_id = {partition_id}""" ).drop("""part_id""" )
__a = partition_df.collect()
__a = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class a ( _BaseExamplesIterable ):
def __init__( self : int , lowerCamelCase_ : "pyspark.sql.DataFrame" , lowerCamelCase_ : Union[str, Any]=None , ) -> Union[str, Any]:
__a = df
__a = partition_order or range(self.df.rdd.getNumPartitions() )
__a = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[str] ) -> Union[str, Any]:
yield from self.generate_examples_fn()
def lowerCAmelCase_ ( self : Union[str, Any] , lowerCamelCase_ : np.random.Generator ) -> "SparkExamplesIterable":
__a = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase_ )
return SparkExamplesIterable(self.df , partition_order=lowerCamelCase_ )
def lowerCAmelCase_ ( self : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : int ) -> "SparkExamplesIterable":
__a = self.split_shard_indices_by_worker(lowerCamelCase_ , lowerCamelCase_ )
return SparkExamplesIterable(self.df , partition_order=lowerCamelCase_ )
@property
def lowerCAmelCase_ ( self : List[str] ) -> int:
return len(self.partition_order )
class a ( datasets.DatasetBuilder ):
A_ : str = SparkConfig
def __init__( self : str , lowerCamelCase_ : "pyspark.sql.DataFrame" , lowerCamelCase_ : str = None , lowerCamelCase_ : str = None , **lowerCamelCase_ : Optional[Any] , ) -> Optional[Any]:
import pyspark
__a = pyspark.sql.SparkSession.builder.getOrCreate()
__a = df
__a = working_dir
super().__init__(
cache_dir=lowerCamelCase_ , config_name=str(self.df.semanticHash() ) , **lowerCamelCase_ , )
def lowerCAmelCase_ ( self : Optional[int] ) -> Dict:
# Returns the path of the created file.
def create_cache_and_write_probe(lowerCamelCase_ : Dict ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowerCamelCase_ )
__a = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__a = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCamelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def lowerCAmelCase_ ( self : Dict ) -> List[Any]:
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase_ ( self : Optional[int] , lowerCamelCase_ : datasets.download.download_manager.DownloadManager ) -> Tuple:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCAmelCase_ ( self : Tuple , lowerCamelCase_ : Any ) -> List[str]:
import pyspark
def get_arrow_batch_size(lowerCamelCase_ : Optional[int] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
__a = self.df.count()
__a = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__a = (
self.df.limit(lowerCamelCase_ )
.repartition(1 )
.mapInArrow(lowerCamelCase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__a = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__a = min(lowerCamelCase_ , int(approx_total_size / max_shard_size ) )
__a = self.df.repartition(lowerCamelCase_ )
def lowerCAmelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
__a = ParquetWriter if file_format == """parquet""" else ArrowWriter
__a = os.path.join(self._working_dir , os.path.basename(lowerCamelCase_ ) ) if self._working_dir else fpath
__a = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__a = self.config.features
__a = self._writer_batch_size
__a = self._fs.storage_options
def write_arrow(lowerCamelCase_ : List[Any] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__a = pyspark.TaskContext().taskAttemptId()
__a = next(lowerCamelCase_ , lowerCamelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
__a = 0
__a = writer_class(
features=lowerCamelCase_ , path=working_fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , writer_batch_size=lowerCamelCase_ , storage_options=lowerCamelCase_ , embed_local_files=lowerCamelCase_ , )
__a = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__a , __a = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
__a = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , writer_batch_size=lowerCamelCase_ , storage_options=lowerCamelCase_ , embed_local_files=lowerCamelCase_ , )
__a = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase_ )
if writer._num_bytes > 0:
__a , __a = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase_ ) ):
__a = os.path.join(os.path.dirname(lowerCamelCase_ ) , os.path.basename(lowerCamelCase_ ) )
shutil.move(lowerCamelCase_ , lowerCamelCase_ )
__a = (
self.df.mapInArrow(lowerCamelCase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCAmelCase_ ( self : Optional[int] , lowerCamelCase_ : "datasets.SplitGenerator" , lowerCamelCase_ : str = "arrow" , lowerCamelCase_ : Optional[Union[str, int]] = None , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ) -> List[str]:
self._validate_cache_dir()
__a = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase_ )
__a = not is_remote_filesystem(self._fs )
__a = os.path.join if is_local else posixpath.join
__a = """-TTTTT-SSSSS-of-NNNNN"""
__a = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
__a = path_join(self._output_dir , lowerCamelCase_ )
__a = 0
__a = 0
__a = 0
__a = []
__a = []
for task_id, content in self._prepare_split_single(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase_ )
__a = total_num_examples
__a = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
__a = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__a = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , ):
rename(
lowerCamelCase_ , fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , fpath.replace("""TTTTT-SSSSS""" , F"""{global_shard_id:05d}""" ).replace("""NNNNN""" , F"""{total_shards:05d}""" ) , )
__a = []
__a = 0
for i in range(len(lowerCamelCase_ ) ):
__a , __a = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase_ , len(lowerCamelCase_ ) ).map(lambda lowerCamelCase_ : _rename_shard(*lowerCamelCase_ ) ).collect()
else:
# don't use any pattern
__a = 0
__a = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , fpath.replace(lowerCamelCase_ , """""" ) , )
def lowerCAmelCase_ ( self : List[Any] , lowerCamelCase_ : "datasets.SplitGenerator" , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df )
| 721 | """simple docstring"""
def UpperCamelCase ( ):
return 1
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int ):
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int = 200 ):
return two_pound(_lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 173 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : int = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'visual_bert'
def __init__( self: int , _SCREAMING_SNAKE_CASE: Dict=3_0522 , _SCREAMING_SNAKE_CASE: Union[str, Any]=768 , _SCREAMING_SNAKE_CASE: List[str]=512 , _SCREAMING_SNAKE_CASE: Optional[int]=12 , _SCREAMING_SNAKE_CASE: List[str]=12 , _SCREAMING_SNAKE_CASE: Tuple=3072 , _SCREAMING_SNAKE_CASE: Union[str, Any]="gelu" , _SCREAMING_SNAKE_CASE: Tuple=0.1 , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: List[Any]=512 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: Optional[int]=0.02 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1e-12 , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: str=1 , _SCREAMING_SNAKE_CASE: Tuple=0 , _SCREAMING_SNAKE_CASE: Any=2 , **_SCREAMING_SNAKE_CASE: List[str] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Tuple = max_position_embeddings
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : Union[str, Any] = visual_embedding_dim
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : Dict = type_vocab_size
__lowerCAmelCase : Optional[int] = layer_norm_eps
__lowerCAmelCase : Union[str, Any] = bypass_transformer
__lowerCAmelCase : List[str] = special_visual_initialize | 293 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : int = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'visual_bert'
def __init__( self: int , _SCREAMING_SNAKE_CASE: Dict=3_0522 , _SCREAMING_SNAKE_CASE: Union[str, Any]=768 , _SCREAMING_SNAKE_CASE: List[str]=512 , _SCREAMING_SNAKE_CASE: Optional[int]=12 , _SCREAMING_SNAKE_CASE: List[str]=12 , _SCREAMING_SNAKE_CASE: Tuple=3072 , _SCREAMING_SNAKE_CASE: Union[str, Any]="gelu" , _SCREAMING_SNAKE_CASE: Tuple=0.1 , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: List[Any]=512 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: Optional[int]=0.02 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1e-12 , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: str=1 , _SCREAMING_SNAKE_CASE: Tuple=0 , _SCREAMING_SNAKE_CASE: Any=2 , **_SCREAMING_SNAKE_CASE: List[str] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Tuple = max_position_embeddings
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : Union[str, Any] = visual_embedding_dim
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : Dict = type_vocab_size
__lowerCAmelCase : Optional[int] = layer_norm_eps
__lowerCAmelCase : Union[str, Any] = bypass_transformer
__lowerCAmelCase : List[str] = special_visual_initialize | 293 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["CLIPFeatureExtractor"]
UpperCamelCase_ = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 508 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
UpperCamelCase_ = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
UpperCamelCase_ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[int] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = PRETRAINED_VOCAB_FILES_MAP
A : Tuple = ['''input_ids''', '''attention_mask''']
A : Tuple = MBartTokenizer
A : List[int] = []
A : List[int] = []
def __init__( self, A=None, A=None, A="<s>", A="</s>", A="</s>", A="<s>", A="<unk>", A="<pad>", A="<mask>", A=None, A=None, A=None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else mask_token
super().__init__(
vocab_file=A, tokenizer_file=A, bos_token=A, eos_token=A, sep_token=A, cls_token=A, unk_token=A, pad_token=A, mask_token=A, src_lang=A, tgt_lang=A, additional_special_tokens=A, **A, )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : int = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
SCREAMING_SNAKE_CASE : Tuple = {
lang_code: self.convert_tokens_to_ids(A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE : Tuple = src_lang if src_lang is not None else 'en_XX'
SCREAMING_SNAKE_CASE : int = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self, A, A, A, A, **A ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
SCREAMING_SNAKE_CASE : Optional[Any] = src_lang
SCREAMING_SNAKE_CASE : Optional[Any] = self(A, add_special_tokens=A, return_tensors=A, **A )
SCREAMING_SNAKE_CASE : int = self.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : str = tgt_lang_id
return inputs
def UpperCamelCase_ ( self, A, A = "en_XX", A = None, A = "ro_RO", **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = src_lang
SCREAMING_SNAKE_CASE : str = tgt_lang
return super().prepare_seqaseq_batch(A, A, **A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE : int = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE : List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
SCREAMING_SNAKE_CASE : List[str] = os.path.join(
A, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file, A )
return (out_vocab_file,)
| 508 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=a_ )
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
__UpperCAmelCase = Features({'image': Image()} )
__UpperCAmelCase = Features({'labels': ClassLabel} )
__UpperCAmelCase = "image"
__UpperCAmelCase = "labels"
def __snake_case ( self : int, _snake_case : Union[str, Any] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column], _snake_case ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
snake_case : List[str] =copy.deepcopy(self )
snake_case : List[str] =self.label_schema.copy()
snake_case : int =features[self.label_column]
snake_case : List[Any] =label_schema
return task_template
@property
def __snake_case ( self : str ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 349 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_SCREAMING_SNAKE_CASE = """src/diffusers"""
_SCREAMING_SNAKE_CASE = """."""
# This is to make sure the diffusers module imported is the one in the repo.
_SCREAMING_SNAKE_CASE = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
_SCREAMING_SNAKE_CASE = spec.loader.load_module()
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return line.startswith(__a ) or len(__a ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , __a ) is not None
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Union[str, Any] = object_name.split('.' )
snake_case_ : Tuple = 0
# First let's find the module where our object lives.
snake_case_ : List[Any] = parts[i]
while i < len(__a ) and not os.path.isfile(os.path.join(__a , f"""{module}.py""" ) ):
i += 1
if i < len(__a ):
snake_case_ : List[Any] = os.path.join(__a , parts[i] )
if i >= len(__a ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__a , f"""{module}.py""" ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
snake_case_ : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
snake_case_ : List[Any] = ''
snake_case_ : List[Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__a ) and re.search(rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__a ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
snake_case_ : Any = line_index
while line_index < len(__a ) and _should_continue(lines[line_index] , __a ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case_ : Tuple = lines[start_index:line_index]
return "".join(__a )
_SCREAMING_SNAKE_CASE = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
_SCREAMING_SNAKE_CASE = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
_SCREAMING_SNAKE_CASE = re.compile(R"""<FILL\s+[^>]*>""")
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Union[str, Any] = code.split('\n' )
snake_case_ : str = 0
while idx < len(__a ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__a ):
return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : str = len(get_indent(__a ) ) > 0
if has_indent:
snake_case_ : List[Any] = f"""class Bla:\n{code}"""
snake_case_ : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=__a )
snake_case_ : Tuple = black.format_str(__a , mode=__a )
snake_case_ ,snake_case_ : int = style_docstrings_in_code(__a )
return result[len('class Bla:\n' ) :] if has_indent else result
def SCREAMING_SNAKE_CASE__ ( __a , __a=False ):
with open(__a , 'r' , encoding='utf-8' , newline='\n' ) as f:
snake_case_ : Any = f.readlines()
snake_case_ : List[str] = []
snake_case_ : Any = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__a ):
snake_case_ : List[Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
snake_case_ ,snake_case_ ,snake_case_ : Union[str, Any] = search.groups()
snake_case_ : Union[str, Any] = find_code_in_diffusers(__a )
snake_case_ : Tuple = get_indent(__a )
snake_case_ : Optional[int] = line_index + 1 if indent == theoretical_indent else line_index + 2
snake_case_ : str = theoretical_indent
snake_case_ : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
snake_case_ : Tuple = True
while line_index < len(__a ) and should_continue:
line_index += 1
if line_index >= len(__a ):
break
snake_case_ : List[Any] = lines[line_index]
snake_case_ : Union[str, Any] = _should_continue(__a , __a ) and re.search(f"""^{indent}# End copy""" , __a ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case_ : int = lines[start_index:line_index]
snake_case_ : Dict = ''.join(__a )
# Remove any nested `Copied from` comments to avoid circular copies
snake_case_ : Any = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(__a ) is None]
snake_case_ : Tuple = '\n'.join(__a )
# Before comparing, use the `replace_pattern` on the original code.
if len(__a ) > 0:
snake_case_ : Optional[int] = replace_pattern.replace('with' , '' ).split(',' )
snake_case_ : str = [_re_replace_pattern.search(__a ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
snake_case_ ,snake_case_ ,snake_case_ : str = pattern.groups()
snake_case_ : Optional[Any] = re.sub(__a , __a , __a )
if option.strip() == "all-casing":
snake_case_ : Optional[int] = re.sub(obja.lower() , obja.lower() , __a )
snake_case_ : Tuple = re.sub(obja.upper() , obja.upper() , __a )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
snake_case_ : str = blackify(lines[start_index - 1] + theoretical_code )
snake_case_ : Dict = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
snake_case_ : str = lines[:start_index] + [theoretical_code] + lines[line_index:]
snake_case_ : int = start_index + 1
if overwrite and len(__a ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__a )
return diffs
def SCREAMING_SNAKE_CASE__ ( __a = False ):
snake_case_ : Dict = glob.glob(os.path.join(__a , '**/*.py' ) , recursive=__a )
snake_case_ : int = []
for filename in all_files:
snake_case_ : Union[str, Any] = is_copy_consistent(__a , __a )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__a ) > 0:
snake_case_ : List[Any] = '\n'.join(__a )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 534 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_SCREAMING_SNAKE_CASE = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
_SCREAMING_SNAKE_CASE = json.load(f)
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Union[str, Any] , _A : Any ) -> Optional[Any]:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(_A )
def UpperCAmelCase_ ( self : Dict , _A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Tuple = FSMTForConditionalGeneration.from_pretrained(_A ).to(_A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def UpperCAmelCase_ ( self : int , _A : Optional[Any] , _A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Tuple = F"""facebook/wmt19-{pair}"""
snake_case_ : List[Any] = self.get_tokenizer(_A )
snake_case_ : List[str] = self.get_model(_A )
snake_case_ : Union[str, Any] = bleu_data[pair]['src']
snake_case_ : List[str] = bleu_data[pair]['tgt']
snake_case_ : Optional[int] = tokenizer(_A , return_tensors='pt' , truncation=_A , padding='longest' ).to(_A )
snake_case_ : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case_ : Dict = tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
snake_case_ : Tuple = calculate_bleu(_A , _A )
print(_A )
self.assertGreaterEqual(scores['bleu'] , _A )
| 534 | 1 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
if len(_A ) <= 1:
return [tuple(_A )]
_lowerCAmelCase : List[str] = []
def generate(_A , _A ):
_lowerCAmelCase : Any = [0] * n
res.append(tuple(_A ) )
_lowerCAmelCase : Any = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_lowerCAmelCase , _lowerCAmelCase : Tuple = arr[i], arr[0]
else:
_lowerCAmelCase , _lowerCAmelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(_A ) )
c[i] += 1
_lowerCAmelCase : Optional[int] = 0
else:
_lowerCAmelCase : Any = 0
i += 1
generate(len(_A ) , _A )
return res
if __name__ == "__main__":
lowerCAmelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 444 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
if len(_A ) <= 1:
return [tuple(_A )]
_lowerCAmelCase : List[str] = []
def generate(_A , _A ):
_lowerCAmelCase : Any = [0] * n
res.append(tuple(_A ) )
_lowerCAmelCase : Any = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_lowerCAmelCase , _lowerCAmelCase : Tuple = arr[i], arr[0]
else:
_lowerCAmelCase , _lowerCAmelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(_A ) )
c[i] += 1
_lowerCAmelCase : Optional[int] = 0
else:
_lowerCAmelCase : Any = 0
i += 1
generate(len(_A ) , _A )
return res
if __name__ == "__main__":
lowerCAmelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 444 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """data2vec-vision"""
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-12 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ) -> Any:
super().__init__(**_UpperCAmelCase )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = use_mask_token
UpperCamelCase_ = use_absolute_position_embeddings
UpperCamelCase_ = use_relative_position_bias
UpperCamelCase_ = use_shared_relative_position_bias
UpperCamelCase_ = layer_scale_init_value
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCamelCase_ = out_indices
UpperCamelCase_ = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCamelCase_ = use_auxiliary_head
UpperCamelCase_ = auxiliary_loss_weight
UpperCamelCase_ = auxiliary_channels
UpperCamelCase_ = auxiliary_num_convs
UpperCamelCase_ = auxiliary_concat_input
UpperCamelCase_ = semantic_loss_ignore_index
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = version.parse("""1.11""" )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4
| 618 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=None , ) -> Optional[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = (image_size // patch_size) ** 2
UpperCamelCase_ = num_patches + 1
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> int:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
UpperCamelCase_ = ViTMSNModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
UpperCamelCase_ = self.type_sequence_label_size
UpperCamelCase_ = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_ = 1
UpperCamelCase_ = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
A_ = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = ViTMSNModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Dict:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = ViTMSNModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> List[str]:
torch.manual_seed(2 )
UpperCamelCase_ = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 618 | 1 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
SCREAMING_SNAKE_CASE__ = 25_0004
SCREAMING_SNAKE_CASE__ = 25_0020
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = MBartTokenizer
__SCREAMING_SNAKE_CASE : Tuple = MBartTokenizerFast
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : Tuple = True
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a : Any = MBartTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : List[Any] = MBartTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__a : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__a : Optional[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__a : List[str] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__a : Tuple = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a : Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : Dict = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : str = tempfile.mkdtemp()
__a : Any = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : Dict = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__a : List[str] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
__a : Dict = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
__a : Optional[Any] = tempfile.mkdtemp()
__a : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
__a : int = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
__a : Optional[Any] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
__a : Tuple = tempfile.mkdtemp()
__a : List[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
__a : Tuple = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a : Union[str, Any] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''facebook/mbart-large-en-ro'''
__SCREAMING_SNAKE_CASE : Optional[Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__SCREAMING_SNAKE_CASE : Dict = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__SCREAMING_SNAKE_CASE : Optional[int] = [8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2, EN_CODE]
@classmethod
def __lowerCAmelCase ( cls : List[str] ):
'''simple docstring'''
__a : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
__a : Dict = 1
return cls
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 2_5_0_0_2_0 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
__a : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
__a : Union[str, Any] = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
__a : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Any = ['this is gunna be a long sentence ' * 2_0]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE__ )
__a : int = 1_0
__a : Dict = self.tokenizer(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Tuple = tempfile.mkdtemp()
__a : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE__ )
@require_torch
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__a : Any = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__a : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
__a : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : str = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=3 , return_tensors='pt' )
__a : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=1_0 , return_tensors='pt' )
__a : Optional[Any] = targets['input_ids']
__a : Dict = shift_tokens_right(SCREAMING_SNAKE_CASE__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Dict = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
# A, test, EOS, en_XX
'input_ids': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 2_5_0_0_0_1,
} , )
| 47 |
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ):
# Check if the input is valid
if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
__a , __a , __a : Optional[Any] = equationa
__a , __a , __a : Optional[int] = equationa
# Calculate the determinants of the matrices
__a : str = aa * ba - aa * ba
__a : Tuple = ca * ba - ca * ba
__a : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__a : Any = determinant_x / determinant
__a : Optional[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 47 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = 42
@flax_register_to_config
class lowercase__ ( nn.Module, A, A ):
'''simple docstring'''
_UpperCAmelCase = 32
_UpperCAmelCase = 4
_UpperCAmelCase = 4
_UpperCAmelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCAmelCase = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_UpperCAmelCase = False
_UpperCAmelCase = (3_20, 6_40, 12_80, 12_80)
_UpperCAmelCase = 2
_UpperCAmelCase = 8
_UpperCAmelCase = None
_UpperCAmelCase = 12_80
_UpperCAmelCase = 0.0
_UpperCAmelCase = False
_UpperCAmelCase = jnp.floataa
_UpperCAmelCase = True
_UpperCAmelCase = 0
_UpperCAmelCase = False
def lowerCamelCase_ ( self , snake_case ) -> FrozenDict:
# init input tensors
_UpperCAmelCase = (1, self.in_channels, self.sample_size, self.sample_size)
_UpperCAmelCase = jnp.zeros(snake_case , dtype=jnp.floataa )
_UpperCAmelCase = jnp.ones((1,) , dtype=jnp.intaa )
_UpperCAmelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_UpperCAmelCase , _UpperCAmelCase = jax.random.split(snake_case )
_UpperCAmelCase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case )["params"]
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = self.block_out_channels
_UpperCAmelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCAmelCase = self.num_attention_heads or self.attention_head_dim
# input
_UpperCAmelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCAmelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_UpperCAmelCase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
_UpperCAmelCase = self.only_cross_attention
if isinstance(snake_case , snake_case ):
_UpperCAmelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
_UpperCAmelCase = (num_attention_heads,) * len(self.down_block_types )
# down
_UpperCAmelCase = []
_UpperCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_UpperCAmelCase = output_channel
_UpperCAmelCase = block_out_channels[i]
_UpperCAmelCase = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCAmelCase = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCAmelCase = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
_UpperCAmelCase = down_blocks
# mid
_UpperCAmelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_UpperCAmelCase = []
_UpperCAmelCase = list(reversed(snake_case ) )
_UpperCAmelCase = list(reversed(snake_case ) )
_UpperCAmelCase = list(reversed(snake_case ) )
_UpperCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_UpperCAmelCase = output_channel
_UpperCAmelCase = reversed_block_out_channels[i]
_UpperCAmelCase = reversed_block_out_channels[min(i + 1 , len(snake_case ) - 1 )]
_UpperCAmelCase = i == len(snake_case ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_UpperCAmelCase = FlaxCrossAttnUpBlockaD(
in_channels=snake_case , out_channels=snake_case , prev_output_channel=snake_case , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCAmelCase = FlaxUpBlockaD(
in_channels=snake_case , out_channels=snake_case , prev_output_channel=snake_case , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(snake_case )
_UpperCAmelCase = output_channel
_UpperCAmelCase = up_blocks
# out
_UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_UpperCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case = True , snake_case = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(snake_case , jnp.ndarray ):
_UpperCAmelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
_UpperCAmelCase = timesteps.astype(dtype=jnp.floataa )
_UpperCAmelCase = jnp.expand_dims(snake_case , 0 )
_UpperCAmelCase = self.time_proj(snake_case )
_UpperCAmelCase = self.time_embedding(snake_case )
# 2. pre-process
_UpperCAmelCase = jnp.transpose(snake_case , (0, 2, 3, 1) )
_UpperCAmelCase = self.conv_in(snake_case )
# 3. down
_UpperCAmelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
_UpperCAmelCase , _UpperCAmelCase = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
_UpperCAmelCase , _UpperCAmelCase = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_UpperCAmelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
snake_case , snake_case ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_UpperCAmelCase = new_down_block_res_samples
# 4. mid
_UpperCAmelCase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_UpperCAmelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
_UpperCAmelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(snake_case , snake_case ):
_UpperCAmelCase = up_block(
snake_case , temb=snake_case , encoder_hidden_states=snake_case , res_hidden_states_tuple=snake_case , deterministic=not train , )
else:
_UpperCAmelCase = up_block(snake_case , temb=snake_case , res_hidden_states_tuple=snake_case , deterministic=not train )
# 6. post-process
_UpperCAmelCase = self.conv_norm_out(snake_case )
_UpperCAmelCase = nn.silu(snake_case )
_UpperCAmelCase = self.conv_out(snake_case )
_UpperCAmelCase = jnp.transpose(snake_case , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=snake_case )
| 24 |
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) )
self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) )
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = patch_stride
_UpperCAmelCase = patch_padding
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = num_heads
_UpperCAmelCase = stride_kv
_UpperCAmelCase = depth
_UpperCAmelCase = cls_token
_UpperCAmelCase = attention_drop_rate
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self ) -> List[str]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = CvtModel(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case )
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = CvtForImageClassification(snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = CvtModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self ) -> Union[str, Any]:
return
@unittest.skip(reason='Cvt does not output attentions' )
def lowerCamelCase_ ( self ) -> str:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def lowerCamelCase_ ( self ) -> int:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
def check_hidden_states_output(snake_case , snake_case , snake_case ):
_UpperCAmelCase = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(snake_case ) , snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase_ ( self ) -> Dict:
pass
@slow
def lowerCamelCase_ ( self ) -> Dict:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = CvtModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self ) -> List[Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**snake_case )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case )
_UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 24 | 1 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_SCREAMING_SNAKE_CASE : str = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 344 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_SCREAMING_SNAKE_CASE : str = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 344 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def lowerCamelCase_ ( self ):
"""simple docstring"""
raise NotImplementedError() | 302 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = tempfile.mkdtemp()
# fmt: off
A_ : List[str] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
A_ : Union[str, Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
A_ : Dict = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
A_ : Optional[int] = {'unk_token': '<unk>'}
A_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case_ ) )
A_ : str = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : List[str] = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case_ , snake_case_ )
def lowerCamelCase_ ( self , **snake_case_ ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase_ ( self , **snake_case_ ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase_ ( self , **snake_case_ ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
A_ : int = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.get_tokenizer()
A_ : Optional[Any] = self.get_rust_tokenizer()
A_ : Any = self.get_image_processor()
A_ : Any = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_slow.save_pretrained(self.tmpdirname )
A_ : List[str] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
A_ : List[Any] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_fast.save_pretrained(self.tmpdirname )
A_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case_ )
self.assertIsInstance(processor_fast.tokenizer , snake_case_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case_ )
self.assertIsInstance(processor_fast.image_processor , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
A_ : List[str] = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
A_ : Dict = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Tuple = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
A_ : List[str] = self.prepare_image_inputs()
A_ : Dict = image_processor(snake_case_ , return_tensors='np' )
A_ : List[Any] = processor(images=snake_case_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : List[Any] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
A_ : Tuple = 'lower newer'
A_ : List[str] = processor(text=snake_case_ )
A_ : Any = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : List[str] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
A_ : Optional[Any] = 'lower newer'
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Optional[int] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
A_ : Optional[int] = self.prepare_image_inputs()
A_ : List[str] = self.prepare_image_inputs()
A_ : int = processor(images=snake_case_ , visual_prompt=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Optional[Any] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
A_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Tuple = processor.batch_decode(snake_case_ )
A_ : Optional[int] = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ ) | 302 | 1 |
from functools import lru_cache
def UpperCAmelCase_ ( _UpperCAmelCase :Any ) -> str:
'''simple docstring'''
A_ = 2
A_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCamelCase_ )
if n > 1:
factors.add(UpperCamelCase_ )
return factors
@lru_cache
def UpperCAmelCase_ ( _UpperCAmelCase :Optional[Any] ) -> str:
'''simple docstring'''
return len(unique_prime_factors(UpperCamelCase_ ) )
def UpperCAmelCase_ ( _UpperCAmelCase :Tuple ) -> List[str]:
'''simple docstring'''
return len(set(UpperCamelCase_ ) ) in (0, 1)
def UpperCAmelCase_ ( _UpperCAmelCase :int ) -> Tuple:
'''simple docstring'''
A_ = 2
while True:
# Increment each value of a generated range
A_ = [base + i for i in range(UpperCamelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
A_ = [upf_len(UpperCamelCase_ ) for x in group]
checker.append(UpperCamelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCamelCase_ ):
return group
# Increment our base variable by 1
base += 1
def UpperCAmelCase_ ( _UpperCAmelCase :Union[str, Any] = 4 ) -> int:
'''simple docstring'''
A_ = run(UpperCamelCase_ )
return results[0] if len(UpperCamelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 188 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
_SCREAMING_SNAKE_CASE : Any = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
_SCREAMING_SNAKE_CASE : List[Any] = {
"allenai/longformer-base-4096": 40_96,
"allenai/longformer-large-4096": 40_96,
"allenai/longformer-large-4096-finetuned-triviaqa": 40_96,
"allenai/longformer-base-4096-extra.pos.embd.only": 40_96,
"allenai/longformer-large-4096-extra.pos.embd.only": 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = (
list(range(ord('''!''' ) ,ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) ,ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) ,ord('''ÿ''' ) + 1 ) )
)
snake_case = bs[:]
snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
snake_case = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ ,UpperCamelCase_ ) )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = set()
snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case = char
return pairs
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['input_ids', 'attention_mask']
def __init__( self , __snake_case , __snake_case , __snake_case="replace" , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case=False , **__snake_case , ):
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else bos_token
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else eos_token
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else sep_token
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else cls_token
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , )
with open(__snake_case , encoding='''utf-8''' ) as vocab_handle:
snake_case = json.load(__snake_case )
snake_case = {v: k for k, v in self.encoder.items()}
snake_case = errors # how to handle errors in decoding
snake_case = bytes_to_unicode()
snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(__snake_case , encoding='''utf-8''' ) as merges_handle:
snake_case = merges_handle.read().split('''\n''' )[1:-1]
snake_case = [tuple(merge.split() ) for merge in bpe_merges]
snake_case = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
snake_case = {}
snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def a_ ( self ):
return len(self.encoder )
def a_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def a_ ( self , __snake_case ):
if token in self.cache:
return self.cache[token]
snake_case = tuple(__snake_case )
snake_case = get_pairs(__snake_case )
if not pairs:
return token
while True:
snake_case = min(__snake_case , key=lambda __snake_case : self.bpe_ranks.get(__snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case , snake_case = bigram
snake_case = []
snake_case = 0
while i < len(__snake_case ):
try:
snake_case = word.index(__snake_case , __snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case = tuple(__snake_case )
snake_case = new_word
if len(__snake_case ) == 1:
break
else:
snake_case = get_pairs(__snake_case )
snake_case = ''' '''.join(__snake_case )
snake_case = word
return word
def a_ ( self , __snake_case ):
snake_case = []
for token in re.findall(self.pat , __snake_case ):
snake_case = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__snake_case ).split(''' ''' ) )
return bpe_tokens
def a_ ( self , __snake_case ):
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def a_ ( self , __snake_case ):
return self.decoder.get(__snake_case )
def a_ ( self , __snake_case ):
snake_case = ''''''.join(__snake_case )
snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def a_ ( self , __snake_case , __snake_case = None ):
if not os.path.isdir(__snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + '''\n''' )
snake_case = 0
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
snake_case = token_index
writer.write(''' '''.join(__snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
def a_ ( self , __snake_case , __snake_case = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case = [self.cls_token_id]
snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self , __snake_case , __snake_case = None , __snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def a_ ( self , __snake_case , __snake_case = None ):
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a_ ( self , __snake_case , __snake_case=False , **__snake_case ):
snake_case = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__snake_case ) > 0 and not text[0].isspace()):
snake_case = ''' ''' + text
return (text, kwargs)
| 550 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase )
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__(self : List[Any] , **_snake_case : int ) -> Dict:
"""simple docstring"""
super().__init__(**_snake_case )
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , 'vision' )
self.check_model_type(_snake_case )
def __call__(self : int , _snake_case : Union[str, "Image.Image", List[Dict[str, Any]]] , _snake_case : Union[str, List[str]] = None , **_snake_case : Optional[int] , ) -> Dict:
"""simple docstring"""
if "text_queries" in kwargs:
lowerCamelCase_ : Tuple = kwargs.pop('text_queries' )
if isinstance(_snake_case , (str, Image.Image) ):
lowerCamelCase_ : Union[str, Any] = {'image': image, 'candidate_labels': candidate_labels}
else:
lowerCamelCase_ : List[str] = image
lowerCamelCase_ : List[Any] = super().__call__(_snake_case , **_snake_case )
return results
def UpperCAmelCase_ (self : Union[str, Any] , **_snake_case : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Dict = {}
if "threshold" in kwargs:
lowerCamelCase_ : int = kwargs['threshold']
if "top_k" in kwargs:
lowerCamelCase_ : Any = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCAmelCase_ (self : Tuple , _snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ : List[str] = load_image(inputs['image'] )
lowerCamelCase_ : Any = inputs['candidate_labels']
if isinstance(_snake_case , _snake_case ):
lowerCamelCase_ : Any = candidate_labels.split(',' )
lowerCamelCase_ : List[Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_snake_case ):
lowerCamelCase_ : str = self.tokenizer(_snake_case , return_tensors=self.framework )
lowerCamelCase_ : Union[str, Any] = self.image_processor(_snake_case , return_tensors=self.framework )
yield {
"is_last": i == len(_snake_case ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCAmelCase_ (self : List[Any] , _snake_case : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : List[Any] = model_inputs.pop('target_size' )
lowerCamelCase_ : int = model_inputs.pop('candidate_label' )
lowerCamelCase_ : Tuple = model_inputs.pop('is_last' )
lowerCamelCase_ : Optional[Any] = self.model(**_snake_case )
lowerCamelCase_ : Optional[Any] = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCAmelCase_ (self : Optional[int] , _snake_case : Optional[Any] , _snake_case : Any=0.1 , _snake_case : Union[str, Any]=None ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Dict = []
for model_output in model_outputs:
lowerCamelCase_ : str = model_output['candidate_label']
lowerCamelCase_ : Optional[int] = BaseModelOutput(_snake_case )
lowerCamelCase_ : List[str] = self.image_processor.post_process_object_detection(
outputs=_snake_case , threshold=_snake_case , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
lowerCamelCase_ : Tuple = outputs['scores'][index].item()
lowerCamelCase_ : List[Any] = self._get_bounding_box(outputs['boxes'][index][0] )
lowerCamelCase_ : Dict = {'score': score, 'label': label, 'box': box}
results.append(_snake_case )
lowerCamelCase_ : int = sorted(_snake_case , key=lambda _snake_case : x["score"] , reverse=_snake_case )
if top_k:
lowerCamelCase_ : List[Any] = results[:top_k]
return results
def UpperCAmelCase_ (self : List[Any] , _snake_case : "torch.Tensor" ) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[Any] = box.int().tolist()
lowerCamelCase_ : Tuple = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 144 |
from __future__ import annotations
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Union[str, Any] = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCamelCase_ : List[Any] = i + 1
else:
lowerCamelCase_ : Any = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 144 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __lowerCamelCase ( ) -> int:
__UpperCamelCase : Tuple = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__lowerCAmelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__lowerCAmelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__lowerCAmelCase )
return parser.parse_args()
def __lowerCamelCase ( ) -> Dict:
__UpperCamelCase : int = parse_args()
# Import training_script as a module.
__UpperCamelCase : str = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__UpperCamelCase : Dict = script_fpath.stem
__UpperCamelCase : Optional[int] = importlib.import_module(__lowerCAmelCase )
# Patch sys.argv
__UpperCamelCase : Any = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 269 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : Tuple ) -> List[str]:
__UpperCamelCase : List[str] = R"""\w+[.]\d+"""
__UpperCamelCase : Optional[int] = re.findall(__lowerCAmelCase , __lowerCAmelCase )
for pat in pats:
__UpperCamelCase : str = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) )
return key
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> str:
__UpperCamelCase : Dict = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__UpperCamelCase : List[Any] = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__UpperCamelCase : Dict = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__UpperCamelCase : Any = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__UpperCamelCase : List[Any] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__UpperCamelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__UpperCamelCase : Union[str, Any] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
__UpperCamelCase : int = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__UpperCamelCase : List[Any] = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__UpperCamelCase : Tuple = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int]=42 ) -> int:
# Step 1: Convert pytorch tensor to numpy
__UpperCamelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__UpperCamelCase : str = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) )
__UpperCamelCase : Union[str, Any] = flatten_dict(__lowerCAmelCase )
__UpperCamelCase : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase : str = rename_key(__lowerCAmelCase )
__UpperCamelCase : Any = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase : Tuple = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
__UpperCamelCase : Any = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
| 269 | 1 |
from __future__ import annotations
def A ( UpperCAmelCase ):
return len(set(UpperCAmelCase ) ) == len(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 278 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_snake_case : List[str] = 1.5
_snake_case : Union[str, Any] = int(factor * num_class_images )
_snake_case : Dict = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=UpperCAmelCase , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=UpperCAmelCase )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
_snake_case : Optional[int] = client.query(text=UpperCAmelCase )
if len(UpperCAmelCase ) >= factor * num_class_images or num_images > 1e4:
break
else:
_snake_case : str = int(factor * num_images )
_snake_case : Union[str, Any] = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=UpperCAmelCase , aesthetic_weight=0.1 , )
_snake_case : List[Any] = 0
_snake_case : Any = 0
_snake_case : List[Any] = tqdm(desc="downloading real regularization images" , total=UpperCAmelCase )
with open(F"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(F"""{class_data_dir}/urls.txt""" , "w" ) as fa, open(
F"""{class_data_dir}/images.txt""" , "w" ) as fa:
while total < num_class_images:
_snake_case : Optional[Any] = class_images[count]
count += 1
try:
_snake_case : Tuple = requests.get(images["url"] )
if img.status_code == 200:
_snake_case : int = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def A ( ):
_snake_case : Any = argparse.ArgumentParser("" , add_help=UpperCAmelCase )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=UpperCAmelCase , type=UpperCAmelCase )
parser.add_argument("--class_data_dir" , help="path to save images" , required=UpperCAmelCase , type=UpperCAmelCase )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=UpperCAmelCase )
return parser.parse_args()
if __name__ == "__main__":
__lowerCAmelCase :List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 278 | 1 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Any:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__UpperCAmelCase : Any = flax_key_tuple[:-1] + ('''weight''',)
__UpperCAmelCase : List[Any] = torch.permute(UpperCAmelCase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCAmelCase_ ):
# linear layer
__UpperCAmelCase : int = flax_key_tuple[:-1] + ('''weight''',)
__UpperCAmelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__UpperCAmelCase : List[Any] = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Any:
"""simple docstring"""
if "metadata" in layer:
__UpperCAmelCase : List[Any] = layer.split('''metadata''' )
__UpperCAmelCase : List[str] = ''''''.join(split_layer[0] )[:-1]
__UpperCAmelCase : Optional[Any] = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
__UpperCAmelCase : Tuple = layer.split('''kvstore''' )
__UpperCAmelCase : List[str] = ''''''.join(split_layer[0] )[:-1]
__UpperCAmelCase : Any = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
__UpperCAmelCase : List[Any] = layer.split('''/''' )
__UpperCAmelCase : int = '''/'''.join(split_layer[:-1] )
__UpperCAmelCase : Dict = (split_layer[-1],)
if "kvstore/path" in layer:
__UpperCAmelCase : List[str] = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
__UpperCAmelCase : List[Any] = '''file'''
else:
__UpperCAmelCase : Any = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Dict:
"""simple docstring"""
__UpperCAmelCase : int = rename_keys(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = {}
for k, v in current_block.items():
__UpperCAmelCase : List[str] = v
__UpperCAmelCase : Dict = new_current_block
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = WEIGHTS_NAME ) ->List[Any]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = convert_file_size_to_int(UpperCAmelCase_ )
__UpperCAmelCase : int = []
__UpperCAmelCase : str = {}
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : Union[str, Any] = 0
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
__UpperCAmelCase : int = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
__UpperCAmelCase : Tuple = flatten_dict(UpperCAmelCase_ , sep='''/''' )
__UpperCAmelCase : str = {}
for layer in checkpoint_info.keys():
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = get_key_and_tensorstore_dict(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if curr_real_layer_name in all_layers:
__UpperCAmelCase : Any = content
else:
__UpperCAmelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__UpperCAmelCase : List[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__UpperCAmelCase : str = torch.tensor(UpperCAmelCase_ )
__UpperCAmelCase : List[str] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = rename_base_flax_keys(tuple(key.split('''/''' ) ) , UpperCAmelCase_ )
__UpperCAmelCase : str = '''/'''.join(UpperCAmelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__UpperCAmelCase : Tuple = os.path.join(
UpperCAmelCase_ , weights_name.replace('''.bin''' , f'''-{len(UpperCAmelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(UpperCAmelCase_ , UpperCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Optional[Any] = raw_weights.to(getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__UpperCAmelCase : List[str] = os.path.join(UpperCAmelCase_ , weights_name.replace('''.bin''' , f'''-{len(UpperCAmelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(UpperCAmelCase_ , UpperCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCAmelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : str = {}
for idx, shard in enumerate(UpperCAmelCase_ ):
__UpperCAmelCase : str = weights_name.replace(
'''.bin''' , f'''-{idx+1:05d}-of-{len(UpperCAmelCase_ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
__UpperCAmelCase : Any = os.path.join(UpperCAmelCase_ , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) )
__UpperCAmelCase : str = shard
for key in shard:
__UpperCAmelCase : List[Any] = shard_file
# Add the metadata
__UpperCAmelCase : List[Any] = {'''total_size''': total_size}
__UpperCAmelCase : Tuple = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , '''w''' , encoding='''utf-8''' ) as f:
__UpperCAmelCase : str = json.dumps(UpperCAmelCase_ , indent=2 , sort_keys=UpperCAmelCase_ ) + '''\n'''
f.write(UpperCAmelCase_ )
return metadata, index
if __name__ == "__main__":
lowercase__ :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
lowercase__ :str = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCamelCase_ ( ) ->List[Any]:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__UpperCAmelCase : Optional[Any] = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
__UpperCAmelCase : List[str] = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
__UpperCAmelCase : List[Any] = TaTokenizer.from_pretrained('''t5-small''' )
__UpperCAmelCase : Tuple = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
__UpperCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase_ , return_tensors='''pt''' ).input_ids
__UpperCAmelCase : List[str] = model.generate(UpperCAmelCase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 522 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ :str = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[int] = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Dict = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase__ :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 522 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int , lowercase : int ) -> int:
"""simple docstring"""
return abs(lowercase ) if a == 0 else greatest_common_divisor(b % a , lowercase )
def __lowerCAmelCase ( lowercase : int , lowercase : int ) -> int:
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
snake_case ,snake_case : Dict = y, x % y
return abs(lowercase )
def __lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
try:
snake_case : Any = input("Enter two integers separated by comma (,): " ).split("," )
snake_case : Union[str, Any] = int(nums[0] )
snake_case : int = int(nums[1] )
print(
F'greatest_common_divisor({num_a}, {num_a}) = '
F'{greatest_common_divisor(lowercase , lowercase )}' )
print(F'By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowercase , lowercase )}' )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 117 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def __lowerCAmelCase ( lowercase : Accelerator , lowercase : int = 16 ) -> Any:
"""simple docstring"""
snake_case : Optional[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case : Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase : int ):
# max_length=None => use the model max length (it's actually the default)
snake_case : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case : Tuple = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Optional[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case : Dict = 16
elif accelerator.mixed_precision != "no":
snake_case : Union[str, Any] = 8
else:
snake_case : Any = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
snake_case : Dict = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
snake_case : str = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case = mocked_dataloaders # noqa: F811
def __lowerCAmelCase ( lowercase : List[str] , lowercase : Any ) -> int:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
snake_case : Optional[Any] = 2
# New Code #
snake_case : Tuple = int(args.gradient_accumulation_steps )
snake_case : Any = int(args.local_sgd_steps )
# Initialize accelerator
snake_case : Optional[int] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : List[str] = config["lr"]
snake_case : Tuple = int(config["num_epochs"] )
snake_case : List[str] = int(config["seed"] )
snake_case : Optional[int] = int(config["batch_size"] )
snake_case : Optional[int] = evaluate.load("glue" , "mrpc" )
set_seed(lowercase )
snake_case ,snake_case : Optional[Any] = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : Optional[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case : Tuple = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
snake_case : Any = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case ,snake_case ,snake_case ,snake_case ,snake_case : int = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
with LocalSGD(
accelerator=lowercase , model=lowercase , local_sgd_steps=lowercase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
snake_case : Optional[int] = model(**lowercase )
snake_case : List[Any] = output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case : List[str] = model(**lowercase )
snake_case : str = outputs.logits.argmax(dim=-1 )
snake_case ,snake_case : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
snake_case : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowercase )
def __lowerCAmelCase ( ) -> int:
"""simple docstring"""
snake_case : Optional[int] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=lowercase , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
snake_case : int = parser.parse_args()
snake_case : List[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 117 | 1 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : int , _A : Any ) -> List[str]:
if isinstance(_A , _A ):
__magic_name__ : List[Any] = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self : List[Any] , _A : str , _A : Any , _A : List[str] ) -> Dict:
if len(_A ) == 0 or len(_A ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(_A ) )
if isinstance(_A , _A ):
__magic_name__ : Tuple = [sequences]
__magic_name__ : Any = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , _A : Optional[Any]=ZeroShotClassificationArgumentHandler() , *_A : str , **_A : List[str] ) -> Dict:
__magic_name__ : Any = args_parser
super().__init__(*_A , **_A )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def __lowerCAmelCase ( self : Any , _A : List[Any] , _A : Tuple=True , _A : Tuple=True , _A : List[Any]=TruncationStrategy.ONLY_FIRST , **_A : Any ) -> Tuple:
__magic_name__ : Dict = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
__magic_name__ : str = self.tokenizer.eos_token
try:
__magic_name__ : Optional[Any] = self.tokenizer(
_A , add_special_tokens=_A , return_tensors=_A , padding=_A , truncation=_A , )
except Exception as e:
if "too short" in str(_A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__magic_name__ : List[Any] = self.tokenizer(
_A , add_special_tokens=_A , return_tensors=_A , padding=_A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def __lowerCAmelCase ( self : str , **_A : List[Any] ) -> Union[str, Any]:
if kwargs.get('multi_class' , _A ) is not None:
__magic_name__ : Optional[int] = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
__magic_name__ : Tuple = {}
if "candidate_labels" in kwargs:
__magic_name__ : Union[str, Any] = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
__magic_name__ : List[Any] = kwargs['hypothesis_template']
__magic_name__ : str = {}
if "multi_label" in kwargs:
__magic_name__ : Tuple = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , _A : Union[str, List[str]] , *_A : List[Any] , **_A : str , ) -> List[Any]:
if len(_A ) == 0:
pass
elif len(_A ) == 1 and "candidate_labels" not in kwargs:
__magic_name__ : Tuple = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Any , _A : str=None , _A : List[Any]="This example is {}." ) -> str:
__magic_name__ , __magic_name__ : Optional[int] = self._args_parser(_A , _A , _A )
for i, (candidate_label, sequence_pair) in enumerate(zip(_A , _A ) ):
__magic_name__ : Optional[int] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_A ) - 1,
**model_input,
}
def __lowerCAmelCase ( self : int , _A : List[str] ) -> Any:
__magic_name__ : Union[str, Any] = inputs['candidate_label']
__magic_name__ : List[Any] = inputs['sequence']
__magic_name__ : int = {k: inputs[k] for k in self.tokenizer.model_input_names}
__magic_name__ : int = self.model(**_A )
__magic_name__ : Dict = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def __lowerCAmelCase ( self : str , _A : Optional[Any] , _A : List[str]=False ) -> Optional[Any]:
__magic_name__ : Dict = [outputs['candidate_label'] for outputs in model_outputs]
__magic_name__ : List[Any] = [outputs['sequence'] for outputs in model_outputs]
__magic_name__ : int = np.concatenate([output['logits'].numpy() for output in model_outputs] )
__magic_name__ : Union[str, Any] = logits.shape[0]
__magic_name__ : List[Any] = len(_A )
__magic_name__ : List[Any] = N // n
__magic_name__ : List[Any] = logits.reshape((num_sequences, n, -1) )
if multi_label or len(_A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__magic_name__ : Tuple = self.entailment_id
__magic_name__ : Optional[Any] = -1 if entailment_id == 0 else 0
__magic_name__ : int = reshaped_outputs[..., [contradiction_id, entailment_id]]
__magic_name__ : Optional[int] = np.exp(_A ) / np.exp(_A ).sum(-1 , keepdims=_A )
__magic_name__ : List[str] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__magic_name__ : str = reshaped_outputs[..., self.entailment_id]
__magic_name__ : List[Any] = np.exp(_A ) / np.exp(_A ).sum(-1 , keepdims=_A )
__magic_name__ : Dict = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 561 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Tuple , _A : list[str] ) -> Optional[Any]:
__magic_name__ : list[dict] = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(_A )
self.set_fail_transitions()
def __lowerCAmelCase ( self : Any , _A : int , _A : str ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __lowerCAmelCase ( self : Dict , _A : str ) -> None:
__magic_name__ : Any = 0
for character in keyword:
__magic_name__ : Tuple = self.find_next_state(_A , _A )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__magic_name__ : Optional[int] = len(self.adlist ) - 1
else:
__magic_name__ : int = next_state
self.adlist[current_state]["output"].append(_A )
def __lowerCAmelCase ( self : str ) -> None:
__magic_name__ : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(_A )
__magic_name__ : str = 0
while q:
__magic_name__ : int = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_A )
__magic_name__ : int = self.adlist[r]['fail_state']
while (
self.find_next_state(_A , self.adlist[child]['value'] ) is None
and state != 0
):
__magic_name__ : str = self.adlist[state]['fail_state']
__magic_name__ : Tuple = self.find_next_state(
_A , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
__magic_name__ : Union[str, Any] = 0
__magic_name__ : List[str] = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def __lowerCAmelCase ( self : Optional[Any] , _A : str ) -> dict[str, list[int]]:
__magic_name__ : dict = {} # returns a dict with keywords and list of its occurrences
__magic_name__ : Dict = 0
for i in range(len(_A ) ):
while (
self.find_next_state(_A , string[i] ) is None
and current_state != 0
):
__magic_name__ : Union[str, Any] = self.adlist[current_state]['fail_state']
__magic_name__ : Optional[Any] = self.find_next_state(_A , string[i] )
if next_state is None:
__magic_name__ : List[Any] = 0
else:
__magic_name__ : Dict = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__magic_name__ : Dict = []
result[key].append(i - len(_A ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod() | 561 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase ) -> None:
'''simple docstring'''
_lowercase =value
_lowercase =None
_lowercase =None
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase ) -> None:
'''simple docstring'''
_lowercase =tree
def A__ ( self , lowerCAmelCase ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=18 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=False , ) -> Tuple:
'''simple docstring'''
_lowercase =size if size is not None else {'height': 20, 'width': 20}
_lowercase =crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowercase =parent
_lowercase =batch_size
_lowercase =num_channels
_lowercase =image_size
_lowercase =min_resolution
_lowercase =max_resolution
_lowercase =do_resize
_lowercase =size
_lowercase =do_center_crop
_lowercase =crop_size
_lowercase =do_normalize
_lowercase =image_mean
_lowercase =image_std
_lowercase =do_reduce_labels
def A__ ( self ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def a ( ) -> Tuple:
"""simple docstring"""
_lowercase =load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_lowercase =Image.open(dataset[0]['file'] )
_lowercase =Image.open(dataset[1]['file'] )
return image, map
def a ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_lowercase =Image.open(ds[0]['file'] )
_lowercase =Image.open(ds[1]['file'] )
_lowercase =Image.open(ds[2]['file'] )
_lowercase =Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = BeitImageProcessor if is_vision_available() else None
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =BeitImageProcessingTester(self )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'image_std' ) )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 20, 'width': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase )
_lowercase =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowerCAmelCase )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
_lowercase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase =image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
_lowercase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase =image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowercase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase =image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
_lowercase =[]
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_lowercase =image_processing(image_inputs[0] , maps[0] , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched
_lowercase =image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test not batched input (PIL images)
_lowercase , _lowercase =prepare_semantic_single_inputs()
_lowercase =image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched input (PIL images)
_lowercase , _lowercase =prepare_semantic_batch_inputs()
_lowercase =image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_lowercase , _lowercase =prepare_semantic_single_inputs()
_lowercase =image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 150 )
_lowercase =True
_lowercase =image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
| 380 | 0 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' ,type=a__ ,default='''microsoft/unixcoder-base-nine''')
parser.add_argument('''--num_epochs''' ,type=a__ ,default=5)
parser.add_argument('''--batch_size''' ,type=a__ ,default=6)
parser.add_argument('''--gradient_accumulation_steps''' ,type=a__ ,default=1)
parser.add_argument('''--freeze''' ,type=a__ ,default=a__)
parser.add_argument('''--learning_rate''' ,type=a__ ,default=5e-4)
parser.add_argument('''--seed''' ,type=a__ ,default=0)
parser.add_argument('''--lr_scheduler_type''' ,type=a__ ,default='''cosine''')
parser.add_argument('''--num_warmup_steps''' ,type=a__ ,default=10)
parser.add_argument('''--weight_decay''' ,type=a__ ,default=0.01)
parser.add_argument('''--output_dir''' ,type=a__ ,default='''./results''')
return parser.parse_args()
snake_case_ : int = load('''accuracy''')
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =eval_pred
_SCREAMING_SNAKE_CASE =np.argmax(a__ ,axis=1)
return metric.compute(predictions=a__ ,references=a__)
class A__ ( UpperCamelCase__ ):
def __init__( self : Tuple , _a : List[str] ) -> None:
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE =trainer
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : int , _a : List[Any] , **_a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if control.should_evaluate:
_SCREAMING_SNAKE_CASE =deepcopy(_a )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =get_args()
set_seed(args.seed)
_SCREAMING_SNAKE_CASE =load_dataset('''codeparrot/codecomplex''' ,split='''train''')
_SCREAMING_SNAKE_CASE =dataset.train_test_split(test_size=0.2)
_SCREAMING_SNAKE_CASE =train_test['''test'''].train_test_split(test_size=0.5)
_SCREAMING_SNAKE_CASE =DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
})
print('''Loading tokenizer and model''')
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE =tokenizer.eos_token
_SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained(args.model_ckpt ,num_labels=7)
_SCREAMING_SNAKE_CASE =model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =ClassLabel(num_classes=7 ,names=list(set(train_test_validation['''train''']['''complexity'''])))
def tokenize(a__):
_SCREAMING_SNAKE_CASE =tokenizer(example['''src'''] ,truncation=a__ ,max_length=1024)
_SCREAMING_SNAKE_CASE =labels.straint(example['''complexity'''])
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_SCREAMING_SNAKE_CASE =train_test_validation.map(
a__ ,batched=a__ ,remove_columns=train_test_validation['''train'''].column_names ,)
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(tokenizer=a__)
_SCREAMING_SNAKE_CASE =TrainingArguments(
output_dir=args.output_dir ,learning_rate=args.learning_rate ,lr_scheduler_type=args.lr_scheduler_type ,evaluation_strategy='''epoch''' ,save_strategy='''epoch''' ,logging_strategy='''epoch''' ,per_device_train_batch_size=args.batch_size ,per_device_eval_batch_size=args.batch_size ,num_train_epochs=args.num_epochs ,gradient_accumulation_steps=args.gradient_accumulation_steps ,weight_decay=0.01 ,metric_for_best_model='''accuracy''' ,run_name='''complexity-java''' ,report_to='''wandb''' ,)
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=tokenized_datasets['''train'''] ,eval_dataset=tokenized_datasets['''valid'''] ,tokenizer=a__ ,data_collator=a__ ,compute_metrics=a__ ,)
print('''Training...''')
trainer.add_callback(CustomCallback(a__))
trainer.train()
if __name__ == "__main__":
main() | 691 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 1 |
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
_A = logging.getLogger(__name__)
class _lowerCamelCase ( a_ ):
def _lowerCAmelCase ( self : int , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : int=None , UpperCamelCase : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.layer[current_layer](UpperCamelCase , UpperCamelCase , head_mask[current_layer] )
lowerCAmelCase__ : List[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , a_ , )
class _lowerCamelCase ( a_ ):
def __init__( self : str , UpperCamelCase : int ) -> Dict:
"""simple docstring"""
super().__init__(UpperCamelCase )
lowerCAmelCase__ : List[Any] = BertEncoderWithPabee(UpperCamelCase )
self.init_weights()
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : Any = 0
def _lowerCAmelCase ( self : Dict , UpperCamelCase : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = threshold
def _lowerCAmelCase ( self : int , UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = patience
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Dict = 0
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.inference_layers_num / self.inference_instances_num
lowerCAmelCase__ : Optional[Any] = (
f"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
f""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(UpperCamelCase )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def _lowerCAmelCase ( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=None , UpperCamelCase : List[str]=None , UpperCamelCase : List[str]=None , UpperCamelCase : int=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[str]=None , UpperCamelCase : str=None , UpperCamelCase : List[str]=False , ) -> List[Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
lowerCAmelCase__ : Dict = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase__ : str = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
lowerCAmelCase__ : Dict = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase__ : Any = torch.ones(UpperCamelCase , device=UpperCamelCase )
if token_type_ids is None:
lowerCAmelCase__ : List[str] = torch.zeros(UpperCamelCase , dtype=torch.long , device=UpperCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase__ : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = encoder_hidden_states.size()
lowerCAmelCase__ : Dict = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCAmelCase__ : Dict = torch.ones(UpperCamelCase , device=UpperCamelCase )
lowerCAmelCase__ : List[str] = self.invert_attention_mask(UpperCamelCase )
else:
lowerCAmelCase__ : Optional[int] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase__ : int = self.get_head_mask(UpperCamelCase , self.config.num_hidden_layers )
lowerCAmelCase__ : List[Any] = self.embeddings(
input_ids=UpperCamelCase , position_ids=UpperCamelCase , token_type_ids=UpperCamelCase , inputs_embeds=UpperCamelCase )
lowerCAmelCase__ : int = embedding_output
if self.training:
lowerCAmelCase__ : str = []
for i in range(self.config.num_hidden_layers ):
lowerCAmelCase__ : List[Any] = self.encoder.adaptive_forward(
UpperCamelCase , current_layer=UpperCamelCase , attention_mask=UpperCamelCase , head_mask=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = self.pooler(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = output_layers[i](output_dropout(UpperCamelCase ) )
res.append(UpperCamelCase )
elif self.patience == 0: # Use all layers for inference
lowerCAmelCase__ : str = self.encoder(
UpperCamelCase , attention_mask=UpperCamelCase , head_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , )
lowerCAmelCase__ : Any = self.pooler(encoder_outputs[0] )
lowerCAmelCase__ : Optional[Any] = [output_layers[self.config.num_hidden_layers - 1](UpperCamelCase )]
else:
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : List[str] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCAmelCase__ : str = self.encoder.adaptive_forward(
UpperCamelCase , current_layer=UpperCamelCase , attention_mask=UpperCamelCase , head_mask=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.pooler(UpperCamelCase )
lowerCAmelCase__ : Any = output_layers[i](UpperCamelCase )
if regression:
lowerCAmelCase__ : str = logits.detach()
if patient_result is not None:
lowerCAmelCase__ : Optional[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCAmelCase__ : List[str] = 0
else:
lowerCAmelCase__ : Tuple = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCAmelCase__ : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCamelCase ) ):
patient_counter += 1
else:
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Optional[int] = logits
if patient_counter == self.patience:
break
lowerCAmelCase__ : List[str] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , a_ , )
class _lowerCamelCase ( a_ ):
def __init__( self : Optional[Any] , UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
super().__init__(UpperCamelCase )
lowerCAmelCase__ : List[str] = config.num_labels
lowerCAmelCase__ : Tuple = BertModelWithPabee(UpperCamelCase )
lowerCAmelCase__ : str = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase__ : Optional[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Tuple=None , UpperCamelCase : int=None , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : Optional[Any]=None , ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.bert(
input_ids=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCAmelCase__ : Any = (logits[-1],)
if labels is not None:
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Optional[int] = 0
for ix, logits_item in enumerate(UpperCamelCase ):
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase__ : str = MSELoss()
lowerCAmelCase__ : int = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase__ : Union[str, Any] = CrossEntropyLoss()
lowerCAmelCase__ : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCAmelCase__ : Dict = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCAmelCase__ : List[Any] = (total_loss / total_weights,) + outputs
return outputs
| 507 |
"""simple docstring"""
_A = tuple[float, float, float]
_A = tuple[float, float, float]
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Vectorad:
lowerCAmelCase__ : Tuple = end_pointa[0] - end_pointa[0]
lowerCAmelCase__ : Dict = end_pointa[1] - end_pointa[1]
lowerCAmelCase__ : str = end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Vectorad:
lowerCAmelCase__ : str = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowerCAmelCase__ : Any = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowerCAmelCase__ : Dict = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
return tuple(round(__UpperCAmelCase , __UpperCAmelCase ) for x in vector ) == (0, 0, 0)
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 10 ) -> bool:
lowerCAmelCase__ : List[Any] = create_vector(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Any = create_vector(__UpperCAmelCase , __UpperCAmelCase )
return is_zero_vector(get_ad_vectors_cross(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
| 507 | 1 |
'''simple docstring'''
def __a ( _UpperCamelCase: list ) -> float:
"""simple docstring"""
_snake_case = 0
while len(_UpperCamelCase ) > 1:
_snake_case = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
_snake_case = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
UpperCamelCase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
UpperCamelCase_ : Optional[int] = object()
def __a ( _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_snake_case = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(_UpperCamelCase ) - len(_UpperCamelCase ) + 1 ):
_snake_case = [x.match(_UpperCamelCase ) for x, y in zip(_UpperCamelCase , ks[i:] )]
if matches and all(_UpperCamelCase ):
return True
return False
def __a ( _UpperCamelCase: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
def replace(_UpperCamelCase: Tuple , _UpperCamelCase: List[str] ):
for rule, replacement in rules:
if _match(_UpperCamelCase , _UpperCamelCase ):
return replacement
return val
return replace
def __a ( ) -> Any:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , _UpperCamelCase )),
(("transformer", "wte", "embedding"), P("mp" , _UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_UpperCamelCase , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , _UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_UpperCamelCase , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , _UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __a ( _UpperCamelCase: Union[str, Any] ) -> Any:
"""simple docstring"""
_snake_case = _get_partition_rules()
_snake_case = _replacement_rules(_UpperCamelCase )
_snake_case = {k: _unmatched for k in flatten_dict(_UpperCamelCase )}
_snake_case = {k: replace(_UpperCamelCase , _UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_UpperCamelCase ) )
| 185 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCamelCase = logging.getLogger(__name__)
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = """sequence-classification"""
def __init__( self , UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
if type(UpperCAmelCase_) == dict:
lowercase__: Optional[Any] = Namespace(**UpperCAmelCase_)
lowercase__: Optional[int] = glue_output_modes[hparams.task]
lowercase__: Dict = glue_tasks_num_labels[hparams.task]
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ , self.mode)
def __lowercase ( self , **UpperCAmelCase_) -> List[Any]:
'''simple docstring'''
return self.model(**UpperCAmelCase_)
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_) -> Any:
'''simple docstring'''
lowercase__: Optional[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase__: Any = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowercase__: List[str] = self(**UpperCAmelCase_)
lowercase__: List[str] = outputs[0]
lowercase__: Dict = self.trainer.lr_schedulers[0]["scheduler"]
lowercase__: Tuple = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __lowercase ( self) -> Dict:
'''simple docstring'''
lowercase__: Union[str, Any] = self.hparams
lowercase__: Tuple = processors[args.task]()
lowercase__: int = processor.get_labels()
for mode in ["train", "dev"]:
lowercase__: Optional[int] = self._feature_file(UpperCAmelCase_)
if os.path.exists(UpperCAmelCase_) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , UpperCAmelCase_)
else:
logger.info("Creating features from dataset file at %s" , args.data_dir)
lowercase__: Optional[Any] = (
processor.get_dev_examples(args.data_dir)
if mode == "dev"
else processor.get_train_examples(args.data_dir)
)
lowercase__: int = convert_examples_to_features(
UpperCAmelCase_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , UpperCAmelCase_)
torch.save(UpperCAmelCase_ , UpperCAmelCase_)
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = False) -> DataLoader:
'''simple docstring'''
lowercase__: int = "dev" if mode == "test" else mode
lowercase__: Tuple = self._feature_file(UpperCAmelCase_)
logger.info("Loading features from cached file %s" , UpperCAmelCase_)
lowercase__: Tuple = torch.load(UpperCAmelCase_)
lowercase__: Optional[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowercase__: Dict = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
lowercase__: List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
lowercase__: int = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
lowercase__: Tuple = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) , batch_size=UpperCAmelCase_ , shuffle=UpperCAmelCase_ , )
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_) -> List[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase__: int = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowercase__: Union[str, Any] = self(**UpperCAmelCase_)
lowercase__ , lowercase__: Dict = outputs[:2]
lowercase__: Any = logits.detach().cpu().numpy()
lowercase__: str = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __lowercase ( self , UpperCAmelCase_) -> tuple:
'''simple docstring'''
lowercase__: Union[str, Any] = torch.stack([x["val_loss"] for x in outputs]).mean().detach().cpu().item()
lowercase__: Dict = np.concatenate([x["pred"] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
lowercase__: Union[str, Any] = np.argmax(UpperCAmelCase_ , axis=1)
elif self.hparams.glue_output_mode == "regression":
lowercase__: Dict = np.squeeze(UpperCAmelCase_)
lowercase__: Tuple = np.concatenate([x["target"] for x in outputs] , axis=0)
lowercase__: int = [[] for _ in range(out_label_ids.shape[0])]
lowercase__: Tuple = [[] for _ in range(out_label_ids.shape[0])]
lowercase__: str = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCAmelCase_ , UpperCAmelCase_)}
lowercase__: List[str] = dict(results.items())
lowercase__: Any = results
return ret, preds_list, out_label_list
def __lowercase ( self , UpperCAmelCase_) -> dict:
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__: List[str] = self._eval_end(UpperCAmelCase_)
lowercase__: str = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __lowercase ( self , UpperCAmelCase_) -> dict:
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__: Tuple = self._eval_end(UpperCAmelCase_)
lowercase__: Dict = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __lowercase ( UpperCAmelCase_ , UpperCAmelCase_) -> Optional[Any]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(UpperCAmelCase_ , UpperCAmelCase_)
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCAmelCase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=UpperCAmelCase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets")
return parser
def A( ):
"""simple docstring"""
lowercase__: Optional[Any] = argparse.ArgumentParser()
add_generic_args(snake_case_ , os.getcwd() )
lowercase__: List[Any] = GLUETransformer.add_model_specific_args(snake_case_ , os.getcwd() )
lowercase__: str = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowercase__: List[Any] = os.path.join(
"./results" , F"""{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}""" , )
os.makedirs(args.output_dir )
lowercase__: List[str] = GLUETransformer(snake_case_ )
lowercase__: Union[str, Any] = generic_train(snake_case_ , snake_case_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowercase__: Optional[int] = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=snake_case_ ) )
lowercase__: Union[str, Any] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(snake_case_ )
if __name__ == "__main__":
main()
| 120 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _a :
'''simple docstring'''
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=13 , UpperCAmelCase_=7 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=99 , UpperCAmelCase_=32 , UpperCAmelCase_=5 , UpperCAmelCase_=4 , UpperCAmelCase_=37 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=512 , UpperCAmelCase_=16 , UpperCAmelCase_=2 , UpperCAmelCase_=0.02 , UpperCAmelCase_=3 , UpperCAmelCase_=4 , UpperCAmelCase_=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Any = parent
lowercase__: List[str] = batch_size
lowercase__: Dict = seq_length
lowercase__: Dict = is_training
lowercase__: List[str] = use_input_mask
lowercase__: Dict = use_token_type_ids
lowercase__: Optional[Any] = use_labels
lowercase__: str = vocab_size
lowercase__: Optional[int] = hidden_size
lowercase__: List[Any] = num_hidden_layers
lowercase__: Tuple = num_attention_heads
lowercase__: Optional[Any] = intermediate_size
lowercase__: Any = hidden_act
lowercase__: Optional[int] = hidden_dropout_prob
lowercase__: Optional[int] = attention_probs_dropout_prob
lowercase__: Dict = max_position_embeddings
lowercase__: Dict = type_vocab_size
lowercase__: Dict = type_sequence_label_size
lowercase__: List[str] = initializer_range
lowercase__: Tuple = num_labels
lowercase__: int = num_choices
lowercase__: Optional[int] = scope
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__: Union[str, Any] = None
if self.use_input_mask:
lowercase__: Tuple = random_attention_mask([self.batch_size, self.seq_length])
lowercase__: int = None
if self.use_token_type_ids:
lowercase__: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__: Union[str, Any] = None
lowercase__: List[Any] = None
lowercase__: Tuple = None
if self.use_labels:
lowercase__: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__: Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices)
lowercase__: int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self) -> str:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
lowercase__: List[str] = LlamaModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowercase__: Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
lowercase__: int = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = True
lowercase__: Union[str, Any] = LlamaModel(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowercase__: Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
lowercase__: Union[str, Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , )
lowercase__: List[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = LlamaForCausalLM(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowercase__: Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) -> Optional[Any]:
'''simple docstring'''
lowercase__: int = True
lowercase__: List[Any] = True
lowercase__: Any = LlamaForCausalLM(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# first forward pass
lowercase__: List[Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ , )
lowercase__: Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__: Dict = ids_tensor((self.batch_size, 3) , config.vocab_size)
lowercase__: List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
lowercase__: Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1)
lowercase__: List[Any] = torch.cat([input_mask, next_mask] , dim=-1)
lowercase__: List[str] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , )["hidden_states"][0]
lowercase__: Optional[Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , )["hidden_states"][0]
# select random slice
lowercase__: Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
lowercase__: List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__: Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: str = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): int = config_and_inputs
lowercase__: Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase__ = (LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__: int = LlamaModelTester(self)
lowercase__: str = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __lowercase ( self) -> Any:
'''simple docstring'''
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__: Union[str, Any] = type
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
lowercase__ , lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: List[Any] = 3
lowercase__: Optional[int] = input_dict["input_ids"]
lowercase__: List[str] = input_ids.ne(1).to(UpperCAmelCase_)
lowercase__: List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
lowercase__: str = LlamaForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowercase__: Union[str, Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__ , lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Tuple = 3
lowercase__: List[Any] = "single_label_classification"
lowercase__: Dict = input_dict["input_ids"]
lowercase__: Dict = input_ids.ne(1).to(UpperCAmelCase_)
lowercase__: Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
lowercase__: List[Any] = LlamaForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowercase__: Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__ , lowercase__: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Union[str, Any] = 3
lowercase__: List[str] = "multi_label_classification"
lowercase__: Dict = input_dict["input_ids"]
lowercase__: str = input_ids.ne(1).to(UpperCAmelCase_)
lowercase__: Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
lowercase__: int = LlamaForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowercase__: Union[str, Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test")
def __lowercase ( self) -> Any:
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)])
def __lowercase ( self , UpperCAmelCase_) -> List[Any]:
'''simple docstring'''
lowercase__ , lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Optional[Any] = ids_tensor([1, 10] , config.vocab_size)
lowercase__: List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
lowercase__: Optional[Any] = LlamaModel(UpperCAmelCase_)
original_model.to(UpperCAmelCase_)
original_model.eval()
lowercase__: int = original_model(UpperCAmelCase_).last_hidden_state
lowercase__: int = original_model(UpperCAmelCase_).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
lowercase__: str = {"type": scaling_type, "factor": 10.0}
lowercase__: str = LlamaModel(UpperCAmelCase_)
scaled_model.to(UpperCAmelCase_)
scaled_model.eval()
lowercase__: Optional[Any] = scaled_model(UpperCAmelCase_).last_hidden_state
lowercase__: Optional[Any] = scaled_model(UpperCAmelCase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-5))
else:
self.assertFalse(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-5))
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!")
@slow
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__: Optional[int] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto")
lowercase__: str = model(torch.tensor([input_ids]))
# Expected mean on dim = -1
lowercase__: int = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]])
torch.testing.assert_close(out.mean(-1) , UpperCAmelCase_ , atol=1E-2 , rtol=1E-2)
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase__: Dict = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase_ , atol=1E-5 , rtol=1E-5)
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!")
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
lowercase__: List[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__: List[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto")
lowercase__: Optional[Any] = model(torch.tensor(UpperCAmelCase_))
# Expected mean on dim = -1
lowercase__: Union[str, Any] = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]])
torch.testing.assert_close(out.mean(-1) , UpperCAmelCase_ , atol=1E-2 , rtol=1E-2)
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase__: Dict = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase_ , atol=1E-5 , rtol=1E-5)
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!")
@slow
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__: Optional[int] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto")
lowercase__: str = model(torch.tensor(UpperCAmelCase_))
# Expected mean on dim = -1
lowercase__: List[Any] = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]])
torch.testing.assert_close(out.mean(-1) , UpperCAmelCase_ , atol=1E-2 , rtol=1E-2)
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase__: Dict = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13])
# fmt: on
torch.testing.assert_close(out.mean(-1) , UpperCAmelCase_ , atol=1E-2 , rtol=1E-2)
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test")
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__: List[str] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto")
lowercase__: Optional[Any] = model(torch.tensor(UpperCAmelCase_))
lowercase__: Any = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa)
torch.testing.assert_close(out.mean(-1) , UpperCAmelCase_ , atol=1E-2 , rtol=1E-2)
# fmt: off
lowercase__: List[str] = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase_ , atol=1E-5 , rtol=1E-5)
@unittest.skip("Model is curently gated")
@slow
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: List[Any] = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
lowercase__: List[str] = "Simply put, the theory of relativity states that "
lowercase__: Dict = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf")
lowercase__: Tuple = tokenizer.encode(UpperCAmelCase_ , return_tensors="pt")
lowercase__: Tuple = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=UpperCAmelCase_)
# greedy generation outputs
lowercase__: List[str] = model.generate(UpperCAmelCase_ , max_new_tokens=64 , top_p=UpperCAmelCase_ , temperature=1 , do_sample=UpperCAmelCase_)
lowercase__: Any = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
| 120 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase : Any = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = ['''MaskFormerFeatureExtractor''']
_UpperCAmelCase : str = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
_UpperCAmelCase : int = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 107 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Optional[int] =logging.get_logger(__name__)
set_seed(770)
a__ : str ={
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
a__ : str ={
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
a__ : Dict =os.path.dirname(os.path.abspath(__file__))
a__ : str =os.path.join(os.path.expanduser('''~'''), '''.cache''')
a__ : str =os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def lowercase__ ( __lowercase : List[str] , __lowercase : Optional[int]=False ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = model_type
if use_small:
key += "_small"
return os.path.join(__lowercase , REMOTE_MODEL_PATHS[key]['file_name'] )
def lowercase__ ( __lowercase : Optional[int] , __lowercase : int ) -> str:
"""simple docstring"""
os.makedirs(__lowercase , exist_ok=__lowercase )
hf_hub_download(repo_id=__lowercase , filename=__lowercase , local_dir=__lowercase )
def lowercase__ ( __lowercase : int , __lowercase : Tuple , __lowercase : Optional[int]=False , __lowercase : Tuple="text" ) -> Optional[Any]:
"""simple docstring"""
if model_type == "text":
__UpperCamelCase = BarkSemanticModel
__UpperCamelCase = BarkSemanticConfig
__UpperCamelCase = BarkSemanticGenerationConfig
elif model_type == "coarse":
__UpperCamelCase = BarkCoarseModel
__UpperCamelCase = BarkCoarseConfig
__UpperCamelCase = BarkCoarseGenerationConfig
elif model_type == "fine":
__UpperCamelCase = BarkFineModel
__UpperCamelCase = BarkFineConfig
__UpperCamelCase = BarkFineGenerationConfig
else:
raise NotImplementedError()
__UpperCamelCase = F'''{model_type}_small''' if use_small else model_type
__UpperCamelCase = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__lowercase ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info['repo_id'] , model_info['file_name'] )
__UpperCamelCase = torch.load(__lowercase , map_location=__lowercase )
# this is a hack
__UpperCamelCase = checkpoint['model_args']
if "input_vocab_size" not in model_args:
__UpperCamelCase = model_args['vocab_size']
__UpperCamelCase = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__UpperCamelCase = model_args.pop('n_head' )
__UpperCamelCase = model_args.pop('n_embd' )
__UpperCamelCase = model_args.pop('n_layer' )
__UpperCamelCase = ConfigClass(**checkpoint['model_args'] )
__UpperCamelCase = ModelClass(config=__lowercase )
__UpperCamelCase = GenerationConfigClass()
__UpperCamelCase = model_generation_config
__UpperCamelCase = checkpoint['model']
# fixup checkpoint
__UpperCamelCase = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(__lowercase ):
# replace part of the key with corresponding layer name in HF implementation
__UpperCamelCase = k[len(__lowercase ) :]
for old_layer_name in new_layer_name_dict:
__UpperCamelCase = new_k.replace(__lowercase , new_layer_name_dict[old_layer_name] )
__UpperCamelCase = state_dict.pop(__lowercase )
__UpperCamelCase = set(state_dict.keys() ) - set(model.state_dict().keys() )
__UpperCamelCase = {k for k in extra_keys if not k.endswith('.attn.bias' )}
__UpperCamelCase = set(model.state_dict().keys() ) - set(state_dict.keys() )
__UpperCamelCase = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(__lowercase ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(__lowercase ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(__lowercase , strict=__lowercase )
__UpperCamelCase = model.num_parameters(exclude_embeddings=__lowercase )
__UpperCamelCase = checkpoint['best_val_loss'].item()
logger.info(F'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(__lowercase , 3 )} loss''' )
model.eval()
model.to(__lowercase )
del checkpoint, state_dict
return model
def lowercase__ ( __lowercase : List[Any] , __lowercase : Any=False , __lowercase : List[Any]="text" ) -> int:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__UpperCamelCase = 'cpu' # do conversion on cpu
__UpperCamelCase = _get_ckpt_path(__lowercase , use_small=__lowercase )
__UpperCamelCase = _load_model(__lowercase , __lowercase , model_type=__lowercase , use_small=__lowercase )
# load bark initial model
__UpperCamelCase = _bark_load_model(__lowercase , 'cpu' , model_type=__lowercase , use_small=__lowercase )
if model_type == "text":
__UpperCamelCase = bark_model['model']
if model.num_parameters(exclude_embeddings=__lowercase ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
__UpperCamelCase = 5
__UpperCamelCase = 10
if model_type in ["text", "coarse"]:
__UpperCamelCase = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__UpperCamelCase = bark_model(__lowercase )[0]
__UpperCamelCase = model(__lowercase )
# take last logits
__UpperCamelCase = output_new_model_total.logits[:, [-1], :]
else:
__UpperCamelCase = 3
__UpperCamelCase = 8
__UpperCamelCase = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__UpperCamelCase = model(__lowercase , __lowercase )
__UpperCamelCase = bark_model(__lowercase , __lowercase )
__UpperCamelCase = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : List[Any] , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = os.path.join(__lowercase , __lowercase )
__UpperCamelCase = BarkSemanticConfig.from_pretrained(os.path.join(__lowercase , 'config.json' ) )
__UpperCamelCase = BarkCoarseConfig.from_pretrained(os.path.join(__lowercase , 'config.json' ) )
__UpperCamelCase = BarkFineConfig.from_pretrained(os.path.join(__lowercase , 'config.json' ) )
__UpperCamelCase = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
__UpperCamelCase = BarkSemanticModel.from_pretrained(__lowercase )
__UpperCamelCase = BarkCoarseModel.from_pretrained(__lowercase )
__UpperCamelCase = BarkFineModel.from_pretrained(__lowercase )
__UpperCamelCase = EncodecModel.from_pretrained('facebook/encodec_24khz' )
__UpperCamelCase = BarkConfig.from_sub_model_configs(
__lowercase , __lowercase , __lowercase , __lowercase )
__UpperCamelCase = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__UpperCamelCase = BarkModel(__lowercase )
__UpperCamelCase = semantic
__UpperCamelCase = coarseAcoustic
__UpperCamelCase = fineAcoustic
__UpperCamelCase = codec
__UpperCamelCase = bark_generation_config
Path(__lowercase ).mkdir(exist_ok=__lowercase )
bark.save_pretrained(__lowercase , repo_id=__lowercase , push_to_hub=__lowercase )
if __name__ == "__main__":
a__ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
a__ : Optional[int] =parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 399 | 0 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class a ( nn.Module ):
def __init__( self ) -> str:
super().__init__()
_a = nn.Linear(3 , 4 )
_a = nn.BatchNormad(4 )
_a = nn.Linear(4 , 5 )
def __UpperCAmelCase ( self , __magic_name__ ) -> List[Any]:
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class a ( _SCREAMING_SNAKE_CASE ):
def __UpperCAmelCase ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ) -> Optional[int]:
return (args[0] + 1,) + args[1:], kwargs
class a ( _SCREAMING_SNAKE_CASE ):
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Dict:
return output + 1
class a ( unittest.TestCase ):
def __UpperCAmelCase ( self ) -> List[str]:
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , '_hf_hook' ) )
self.assertFalse(hasattr(__magic_name__ , '_old_forward' ) )
def __UpperCAmelCase ( self ) -> int:
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , '_hf_hook' ) )
self.assertFalse(hasattr(__magic_name__ , '_old_forward' ) )
def __UpperCAmelCase ( self ) -> List[Any]:
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(x + 1 )
_a = test_model(x + 2 )
_a = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
_a = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
_a = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
_a = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1e-5 )
def __UpperCAmelCase ( self ) -> str:
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(__magic_name__ )
_a = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
_a = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
_a = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
_a = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1e-5 )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(__magic_name__ )
_a = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
_a = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a = True
_a = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a = torch.randn(2 , 3 )
_a = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
_a = torch.randn(2 , 3 ).to(0 )
_a = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def __UpperCAmelCase ( self ) -> int:
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_a = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
_a = torch.randn(2 , 3 )
_a = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
_a = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_a = torch.randn(2 , 3 )
_a = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def __UpperCAmelCase ( self ) -> str:
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
_a = torch.randn(2 , 3 )
_a = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_a = torch.randn(2 , 3 )
_a = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def __UpperCAmelCase ( self ) -> str:
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
_a = torch.randn(2 , 3 )
_a = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_a = torch.randn(2 , 3 )
_a = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 706 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Any ) -> str:
'''simple docstring'''
_a = BertConfig.from_json_file(lowerCAmelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
_a = BertForPreTraining(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 532 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def snake_case__( self: Optional[int] ):
lowercase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase_, 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowerCamelCase_, 'depth_multiplier' ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Optional[Any], lowerCamelCase_: str, lowerCamelCase_: List[str]=13, lowerCamelCase_: List[Any]=3, lowerCamelCase_: Union[str, Any]=32, lowerCamelCase_: Union[str, Any]=0.2_5, lowerCamelCase_: Union[str, Any]=8, lowerCamelCase_: Any=True, lowerCamelCase_: Optional[int]=1024, lowerCamelCase_: List[Any]=32, lowerCamelCase_: List[str]="relu6", lowerCamelCase_: List[Any]=0.1, lowerCamelCase_: Union[str, Any]=0.0_2, lowerCamelCase_: Optional[Any]=True, lowerCamelCase_: Dict=True, lowerCamelCase_: List[Any]=10, lowerCamelCase_: Optional[int]=None, ):
lowercase__ : Any = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[int] = num_channels
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[int] = depth_multiplier
lowercase__ : Union[str, Any] = min_depth
lowercase__ : int = tf_padding
lowercase__ : str = int(last_hidden_size * depth_multiplier )
lowercase__ : int = output_stride
lowercase__ : Dict = hidden_act
lowercase__ : List[Any] = classifier_dropout_prob
lowercase__ : Any = use_labels
lowercase__ : int = is_training
lowercase__ : Optional[Any] = num_labels
lowercase__ : List[Any] = initializer_range
lowercase__ : Dict = scope
def snake_case__( self: List[Any] ):
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
lowercase__ : List[Any] = None
if self.use_labels:
lowercase__ : Any = ids_tensor([self.batch_size], self.num_labels )
lowercase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
lowercase__ : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case__( self: str ):
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, min_depth=self.min_depth, tf_padding=self.tf_padding, hidden_act=self.hidden_act, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def snake_case__( self: Tuple, lowerCamelCase_: int, lowerCamelCase_: Any, lowerCamelCase_: List[str], lowerCamelCase_: str ):
lowercase__ : List[Any] = MobileNetVaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : Any = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case__( self: Union[str, Any], lowerCamelCase_: Optional[int], lowerCamelCase_: Optional[int], lowerCamelCase_: Optional[Any], lowerCamelCase_: List[str] ):
lowercase__ : Dict = self.num_labels
lowercase__ : Any = MobileNetVaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : List[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case__( self: List[str] ):
lowercase__ : List[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs
lowercase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_A = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_A = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
def snake_case__( self: Union[str, Any] ):
lowercase__ : int = MobileNetVaModelTester(self )
lowercase__ : Dict = MobileNetVaConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_ )
def snake_case__( self: str ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV1 does not use inputs_embeds' )
def snake_case__( self: str ):
pass
@unittest.skip(reason='MobileNetV1 does not support input and output embeddings' )
def snake_case__( self: Tuple ):
pass
@unittest.skip(reason='MobileNetV1 does not output attentions' )
def snake_case__( self: Optional[int] ):
pass
def snake_case__( self: Union[str, Any] ):
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(lowerCamelCase_ )
lowercase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Union[str, Any] = [*signature.parameters.keys()]
lowercase__ : str = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def snake_case__( self: str ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def snake_case__( self: Tuple ):
def check_hidden_states_output(lowerCamelCase_: List[str], lowerCamelCase_: int, lowerCamelCase_: List[str] ):
lowercase__ : Optional[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowercase__ : Dict = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
lowercase__ : Union[str, Any] = outputs.hidden_states
lowercase__ : Dict = 26
self.assertEqual(len(lowerCamelCase_ ), lowerCamelCase_ )
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def snake_case__( self: Any ):
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def snake_case__( self: Tuple ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[int] = MobileNetVaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
'''simple docstring'''
lowercase__ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__( self: str ):
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224' ) if is_vision_available() else None
)
@slow
def snake_case__( self: List[str] ):
lowercase__ : Union[str, Any] = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224' ).to(lowerCamelCase_ )
lowercase__ : int = self.default_image_processor
lowercase__ : List[str] = prepare_img()
lowercase__ : Tuple = image_processor(images=lowerCamelCase_, return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**lowerCamelCase_ )
# verify the logits
lowercase__ : Optional[Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowercase__ : Optional[Any] = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1E-4 ) )
| 266 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
_A = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_A = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def snake_case__( self: Optional[int], lowerCamelCase_: Tuple, lowerCamelCase_: List[Any], lowerCamelCase_: Optional[Any] ):
lowercase__ : int = AudioClassificationPipeline(model=lowerCamelCase_, feature_extractor=lowerCamelCase_ )
# test with a raw waveform
lowercase__ : Dict = np.zeros((34000,) )
lowercase__ : Optional[int] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def snake_case__( self: Optional[int], lowerCamelCase_: Tuple, lowerCamelCase_: str ):
lowercase__ , lowercase__ : List[str] = examples
lowercase__ : int = audio_classifier(lowerCamelCase_ )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowerCamelCase_, [
{'score': ANY(lowerCamelCase_ ), 'label': ANY(lowerCamelCase_ )},
{'score': ANY(lowerCamelCase_ ), 'label': ANY(lowerCamelCase_ )},
], )
lowercase__ : List[Any] = audio_classifier(lowerCamelCase_, top_k=1 )
self.assertEqual(
lowerCamelCase_, [
{'score': ANY(lowerCamelCase_ ), 'label': ANY(lowerCamelCase_ )},
], )
self.run_torchaudio(lowerCamelCase_ )
@require_torchaudio
def snake_case__( self: Dict, lowerCamelCase_: str ):
import datasets
# test with a local file
lowercase__ : Optional[Any] = datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy', 'clean', split='validation' )
lowercase__ : int = dataset[0]['audio']['array']
lowercase__ : Tuple = audio_classifier(lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_, [
{'score': ANY(lowerCamelCase_ ), 'label': ANY(lowerCamelCase_ )},
{'score': ANY(lowerCamelCase_ ), 'label': ANY(lowerCamelCase_ )},
], )
@require_torch
def snake_case__( self: Optional[int] ):
lowercase__ : List[str] = 'anton-l/wav2vec2-random-tiny-classifier'
lowercase__ : Tuple = pipeline('audio-classification', model=lowerCamelCase_ )
lowercase__ : List[str] = np.ones((8000,) )
lowercase__ : List[Any] = audio_classifier(lowerCamelCase_, top_k=4 )
lowercase__ : List[str] = [
{'score': 0.0_8_4_2, 'label': 'no'},
{'score': 0.0_8_3_8, 'label': 'up'},
{'score': 0.0_8_3_7, 'label': 'go'},
{'score': 0.0_8_3_4, 'label': 'right'},
]
lowercase__ : Optional[int] = [
{'score': 0.0_8_4_5, 'label': 'stop'},
{'score': 0.0_8_4_4, 'label': 'on'},
{'score': 0.0_8_4_1, 'label': 'right'},
{'score': 0.0_8_3_4, 'label': 'left'},
]
self.assertIn(nested_simplify(lowerCamelCase_, decimals=4 ), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowercase__ : Tuple = {'array': np.ones((8000,) ), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate}
lowercase__ : Any = audio_classifier(lowerCamelCase_, top_k=4 )
self.assertIn(nested_simplify(lowerCamelCase_, decimals=4 ), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def snake_case__( self: Optional[int] ):
import datasets
lowercase__ : str = 'superb/wav2vec2-base-superb-ks'
lowercase__ : Optional[int] = pipeline('audio-classification', model=lowerCamelCase_ )
lowercase__ : List[Any] = datasets.load_dataset('anton-l/superb_dummy', 'ks', split='test' )
lowercase__ : Optional[Any] = np.array(dataset[3]['speech'], dtype=np.floataa )
lowercase__ : Optional[int] = audio_classifier(lowerCamelCase_, top_k=4 )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=3 ), [
{'score': 0.9_8_1, 'label': 'go'},
{'score': 0.0_0_7, 'label': 'up'},
{'score': 0.0_0_6, 'label': '_unknown_'},
{'score': 0.0_0_1, 'label': 'down'},
], )
@require_tf
@unittest.skip('Audio classification is not implemented for TF' )
def snake_case__( self: int ):
pass
| 266 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a: Optional[int] = logging.get_logger(__name__)
_a: List[Any] = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = 'xlm-roberta-xl'
def __init__( self : Tuple , lowerCAmelCase : List[Any]=250_880 , lowerCAmelCase : Union[str, Any]=2_560 , lowerCAmelCase : List[str]=36 , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : List[Any]=10_240 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Optional[Any]=514 , lowerCAmelCase : str=1 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : List[Any]=1e-05 , lowerCAmelCase : Dict=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : str=2 , lowerCAmelCase : int="absolute" , lowerCAmelCase : int=True , lowerCAmelCase : Optional[int]=None , **lowerCAmelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class __UpperCamelCase ( lowercase ):
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 268 |
from collections.abc import Sequence
def __lowerCAmelCase ( A , A = False ):
if not arr:
return 0
UpperCAmelCase_ = 0 if allow_empty_subarrays else float("-inf" )
UpperCAmelCase_ = 0.0
for num in arr:
UpperCAmelCase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
UpperCAmelCase_ = max(A , A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_a: Any = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }') | 268 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""")
_SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]])
# The dog is cute and lives in the garden house
_SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_2, 7_6_8)) # batch_size, sequence_length, embedding_vector_dim
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[int] = model(_A)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _A)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1e-3))
@slow
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = XLMRobertaModel.from_pretrained("""xlm-roberta-large""")
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]])
# The dog is cute and lives in the garden house
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_2, 1_0_2_4)) # batch_size, sequence_length, embedding_vector_dim
_SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Any = model(_A)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _A)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1e-3))
| 338 | """simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase_ = pd.read_csv(
'''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'''
'''position_salaries.csv'''
)
lowerCAmelCase_ = dataset.iloc[:, 1:2].values
lowerCAmelCase_ = dataset.iloc[:, 2].values
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase_ = PolynomialFeatures(degree=4)
lowerCAmelCase_ = poly_reg.fit_transform(X)
lowerCAmelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCamelCase_()-> str:
plt.scatter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , color="""red""" )
plt.plot(__SCREAMING_SNAKE_CASE , pol_reg.predict(poly_reg.fit_transform(__SCREAMING_SNAKE_CASE ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 338 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __a ( __UpperCamelCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'num_attention_heads' ) )
class __a :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=64 , lowerCAmelCase__=3 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=16 , lowerCAmelCase__=[128, 256, 384] , lowerCAmelCase__=[4, 6, 8] , lowerCAmelCase__=[2, 3, 4] , lowerCAmelCase__=[16, 16, 16] , lowerCAmelCase__=0 , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , ) -> Dict:
'''simple docstring'''
lowercase__: List[str] = parent
lowercase__: Dict = batch_size
lowercase__: Union[str, Any] = image_size
lowercase__: Dict = num_channels
lowercase__: List[str] = kernel_size
lowercase__: int = stride
lowercase__: Tuple = padding
lowercase__: Tuple = hidden_sizes
lowercase__: Optional[int] = num_attention_heads
lowercase__: List[Any] = depths
lowercase__: Union[str, Any] = key_dim
lowercase__: Union[str, Any] = drop_path_rate
lowercase__: Optional[Any] = patch_size
lowercase__: List[str] = attention_ratio
lowercase__: str = mlp_ratio
lowercase__: Union[str, Any] = initializer_range
lowercase__: List[Any] = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase__: int = is_training
lowercase__: List[str] = use_labels
lowercase__: List[Any] = num_labels
lowercase__: List[str] = initializer_range
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__: Optional[Any] = None
if self.use_labels:
lowercase__: Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase__: Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__: Dict = LevitModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Any = model(lowerCAmelCase__ )
lowercase__: Optional[Any] = (self.image_size, self.image_size)
lowercase__: Union[str, Any] = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__: Dict = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase__: List[str] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Union[str, Any] = self.num_labels
lowercase__: List[Any] = LevitForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: List[Any] = self.prepare_config_and_inputs()
lowercase__: List[str] = config_and_inputs
lowercase__: Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
__lowercase : List[str] = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__lowercase : List[str] = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__lowercase : Optional[int] = False
__lowercase : Optional[int] = False
__lowercase : int = False
__lowercase : Dict = False
__lowercase : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Tuple = LevitModelTester(self )
lowercase__: int = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: List[str] = model_class(lowerCAmelCase__ )
lowercase__: Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__: List[Any] = [*signature.parameters.keys()]
lowercase__: int = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
lowercase__: int = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowercase__: Dict = outputs.hidden_states
lowercase__: Optional[int] = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
lowercase__: List[str] = (self.model_tester.image_size, self.model_tester.image_size)
lowercase__: str = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__: Optional[int] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase__: Union[str, Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowercase__: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Optional[int] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__: Any = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Dict:
'''simple docstring'''
lowercase__: str = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Optional[int] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase__: Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
lowercase__: int = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
lowercase__: Optional[Any] = model(**lowerCAmelCase__ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__: Any = False
lowercase__: int = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase__: Optional[Any] = model_class(lowerCAmelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase__ )
model.train()
lowercase__: List[str] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
lowercase__: Tuple = model(**lowerCAmelCase__ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Optional[int] = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
lowercase__: Any = problem_type['title']
lowercase__: List[str] = problem_type['num_labels']
lowercase__: Tuple = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
lowercase__: Optional[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if problem_type["num_labels"] > 1:
lowercase__: int = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
lowercase__: str = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase__ ) as warning_list:
lowercase__: Tuple = model(**lowerCAmelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: Any = LevitModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def snake_case_ ( ) -> int:
lowercase__: Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: List[Any] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase__ )
lowercase__: Any = self.default_image_processor
lowercase__: Optional[Any] = prepare_img()
lowercase__: List[str] = image_processor(images=lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase__: Dict = model(**lowerCAmelCase__ )
# verify the logits
lowercase__: str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
lowercase__: str = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 715 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
__lowerCAmelCase = {
'''gpt2''': 10_24,
'''gpt2-medium''': 10_24,
'''gpt2-large''': 10_24,
'''gpt2-xl''': 10_24,
'''distilgpt2''': 10_24,
}
class __a ( __UpperCamelCase ):
__lowercase : Optional[int] = VOCAB_FILES_NAMES
__lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Dict = ['input_ids', 'attention_mask']
__lowercase : Any = GPTaTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__: Optional[Any] = kwargs.pop('add_bos_token' , lowerCAmelCase__ )
lowercase__: Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase__ ) != add_prefix_space:
lowercase__: Optional[int] = getattr(lowerCAmelCase__ , pre_tok_state.pop('type' ) )
lowercase__: Union[str, Any] = add_prefix_space
lowercase__: Tuple = pre_tok_class(**lowerCAmelCase__ )
lowercase__: Optional[int] = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
'''simple docstring'''
lowercase__: List[str] = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
'''simple docstring'''
lowercase__: Union[str, Any] = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
lowercase__: Dict = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[int]:
'''simple docstring'''
lowercase__: List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
lowercase__: Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
| 335 | 0 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
A = k_size // 2
A = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
A = 1 / (2 * pi * sigma) * exp(-(square(_lowercase ) + square(_lowercase )) / (2 * square(_lowercase )) )
return g
def _lowerCAmelCase ( UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: str ) -> Optional[Any]:
"""simple docstring"""
A = image.shape[0], image.shape[1]
# dst image height and width
A = height - k_size + 1
A = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
A = zeros((dst_height * dst_width, k_size * k_size) )
A = 0
for i, j in product(range(_lowercase ) , range(_lowercase ) ):
A = ravel(image[i : i + k_size, j : j + k_size] )
A = window
row += 1
# turn the kernel into shape(k*k, 1)
A = gen_gaussian_kernel(_lowercase , _lowercase )
A = ravel(_lowercase )
# reshape and get the dst image
A = dot(_lowercase , _lowercase ).reshape(_lowercase , _lowercase ).astype(_lowercase )
return dst
if __name__ == "__main__":
# read original image
_lowercase : Optional[Any] = imread(R"../image_data/lena.jpg")
# turn image in gray scale value
_lowercase : int = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowercase : Optional[int] = gaussian_filter(gray, 3, sigma=1)
_lowercase : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 641 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
'''simple docstring'''
_A = ["note_seq"]
def __init__( self: int, *lowerCamelCase_: Optional[int], **lowerCamelCase_: Tuple ):
requires_backends(self, ['note_seq'] )
@classmethod
def snake_case__( cls: Optional[Any], *lowerCamelCase_: Tuple, **lowerCamelCase_: Optional[Any] ):
requires_backends(cls, ['note_seq'] )
@classmethod
def snake_case__( cls: Any, *lowerCamelCase_: Dict, **lowerCamelCase_: str ):
requires_backends(cls, ['note_seq'] )
| 266 | 0 |
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__lowerCamelCase : List[str] = '''scheduler_config.json'''
class A_ (a_ ):
"""simple docstring"""
a__ = 1
a__ = 2
a__ = 3
a__ = 4
a__ = 5
@dataclass
class A_ (a_ ):
"""simple docstring"""
a__ = 42
class A_ :
"""simple docstring"""
a__ = SCHEDULER_CONFIG_NAME
a__ = ['''dtype''']
a__ = []
a__ = True
@classmethod
def _A ( cls :Dict , lowerCAmelCase__ :Dict[str, Any] = None , lowerCAmelCase__ :Optional[str] = None , lowerCAmelCase__ :Tuple=False , **lowerCAmelCase__ :Optional[int] , ) -> Dict:
'''simple docstring'''
snake_case_, snake_case_ : int = cls.load_config(
pretrained_model_name_or_path=lowerCAmelCase__ , subfolder=lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ , )
snake_case_, snake_case_ : Optional[int] = cls.from_config(lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ , "create_state" ) and getattr(lowerCAmelCase__ , "has_state" , lowerCAmelCase__ ):
snake_case_ : Optional[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _A ( self :Dict , lowerCAmelCase__ :Union[str, os.PathLike] , lowerCAmelCase__ :bool = False , **lowerCAmelCase__ :int ) -> Any:
'''simple docstring'''
self.save_config(save_directory=lowerCAmelCase__ , push_to_hub=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _A ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def _A ( cls :Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = list(set([cls.__name__] + cls._compatibles ) )
snake_case_ : int = importlib.import_module(__name__.split("." )[0] )
snake_case_ : Union[str, Any] = [
getattr(lowerCAmelCase__ , lowerCAmelCase__ ) for c in compatible_classes_str if hasattr(lowerCAmelCase__ , lowerCAmelCase__ )
]
return compatible_classes
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> jnp.ndarray:
"""simple docstring"""
assert len(__magic_name__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__magic_name__ ) - x.ndim) ) ,__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=0.999 ,__magic_name__=jnp.floataa )-> jnp.ndarray:
"""simple docstring"""
def alpha_bar(__magic_name__ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
snake_case_ : str = []
for i in range(__magic_name__ ):
snake_case_ : Optional[Any] = i / num_diffusion_timesteps
snake_case_ : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__magic_name__ ) / alpha_bar(__magic_name__ ) ,__magic_name__ ) )
return jnp.array(__magic_name__ ,dtype=__magic_name__ )
@flax.struct.dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 42
@classmethod
def _A ( cls :str , lowerCAmelCase__ :Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = scheduler.config
if config.trained_betas is not None:
snake_case_ : List[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
snake_case_ : Optional[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ : Union[str, Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ : Any = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
snake_case_ : List[Any] = 1.0 - betas
snake_case_ : Any = jnp.cumprod(lowerCAmelCase__ , axis=0 )
return cls(
alphas=lowerCAmelCase__ , betas=lowerCAmelCase__ , alphas_cumprod=lowerCAmelCase__ , )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Any:
"""simple docstring"""
snake_case_ : int = state.alphas_cumprod
snake_case_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
snake_case_ : Dict = sqrt_alpha_prod.flatten()
snake_case_ : str = broadcast_to_shape_from_left(__magic_name__ ,original_samples.shape )
snake_case_ : List[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : int = sqrt_one_minus_alpha_prod.flatten()
snake_case_ : List[Any] = broadcast_to_shape_from_left(__magic_name__ ,original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_, snake_case_ : Union[str, Any] = get_sqrt_alpha_prod(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : List[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_, snake_case_ : Tuple = get_sqrt_alpha_prod(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Optional[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 656 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __magic_name__=None )-> List[str]:
"""simple docstring"""
if subparsers is not None:
snake_case_ : List[str] = subparsers.add_parser("test" )
else:
snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" ,default=__magic_name__ ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,)
if subparsers is not None:
parser.set_defaults(func=__magic_name__ )
return parser
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case_ : str = script_name
else:
snake_case_ : Any = F'''--config_file={args.config_file} {script_name}'''
snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split()
snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Dict = test_command_parser()
snake_case_ : Dict = parser.parse_args()
test_command(__magic_name__ )
if __name__ == "__main__":
main()
| 656 | 1 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> int:
lowerCAmelCase__ : int = grid.shape
lowerCAmelCase__ : Optional[Any] = [-1, 1, 0, 0]
lowerCAmelCase__ : Union[str, Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCAmelCase__ : Tuple = [(0, source)], set()
lowerCAmelCase__ : Optional[int] = np.full((rows, cols) , np.inf )
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : List[str] = np.empty((rows, cols) , dtype=__UpperCAmelCase )
lowerCAmelCase__ : str = None
while queue:
(lowerCAmelCase__) : int = heappop(__UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCAmelCase__ : Union[str, Any] = []
while (x, y) != source:
path.append((x, y) )
lowerCAmelCase__ : List[str] = predecessors[x, y]
path.append(__UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ : List[Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCAmelCase__ : Optional[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__UpperCAmelCase , (dist + 1, (nx, ny)) )
lowerCAmelCase__ : List[Any] = dist + 1
lowerCAmelCase__ : Union[str, Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowercase__( A , A , A=0 ):
# Format the message.
if name is None:
snake_case__ : Dict = None
else:
snake_case__ : Optional[int] = '.' * max(0 , spaces - 2 ) + '# {:' + str(5_0 - spaces ) + 's}'
snake_case__ : str = fmt.format(A )
# Print and recurse (if needed).
if isinstance(A , A ):
if msg is not None:
print(A )
for k in val.keys():
recursive_print(A , val[k] , spaces + 2 )
elif isinstance(A , torch.Tensor ):
print(A , ':' , val.size() )
else:
print(A , ':' , A )
def lowercase__( A , A , A , A , A ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
snake_case__ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case__ : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case__ : int = param.view(*A )
snake_case__ : Tuple = param.transpose(0 , 2 )
snake_case__ : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case__ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case__ : Any = param.view(*A )
snake_case__ : Optional[int] = param.transpose(0 , 1 ).contiguous()
snake_case__ : List[Any] = param.view(*A )
return param
def lowercase__( A , A , A ):
# The converted output model.
snake_case__ : Optional[Any] = {}
# old versions did not store training args
snake_case__ : Any = input_state_dict.get('args' , A )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case__ : str = ds_args.padded_vocab_size
snake_case__ : Any = ds_args.max_position_embeddings
snake_case__ : Optional[int] = ds_args.hidden_size
snake_case__ : str = ds_args.num_layers
snake_case__ : List[Any] = ds_args.num_attention_heads
snake_case__ : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case__ : int = config.n_head
# The hidden_size per head.
snake_case__ : Any = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case__ : Optional[Any] = input_state_dict['checkpoint_version']
else:
snake_case__ : Tuple = 0.0
# The model.
snake_case__ : Dict = input_state_dict['model']
# The language model.
snake_case__ : int = model['language_model']
# The embeddings.
snake_case__ : Tuple = lm['embedding']
# The word embeddings.
snake_case__ : Tuple = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
snake_case__ : int = word_embeddings[: config.vocab_size, :]
snake_case__ : List[str] = word_embeddings
# The position embeddings.
snake_case__ : Union[str, Any] = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case__ : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
snake_case__ : int = pos_embeddings
# The transformer.
snake_case__ : Optional[Any] = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
snake_case__ : Union[str, Any] = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
snake_case__ : Any = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case__ : Dict = layer_re.match(A )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case__ : Dict = int(m.group(1 ) )
# The name of the operation.
snake_case__ : List[Any] = m.group(2 )
# Is it a weight or a bias?
snake_case__ : Tuple = m.group(3 )
# The name of the layer.
snake_case__ : int = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
snake_case__ : Union[str, Any] = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
snake_case__ : List[Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case__ : List[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , A , A )
snake_case__ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case__ : Optional[int] = torch.tensor(-1e4 , dtype=torch.floataa )
snake_case__ : int = masked_bias
snake_case__ : List[Any] = fix_query_key_value_ordering(A , A , 3 , A , A )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case__ : List[Any] = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case__ : Optional[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case__ : int = fix_query_key_value_ordering(A , A , 3 , A , A )
# Store. No change of shape.
snake_case__ : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case__ : List[Any] = megatron_to_transformers[op_name]
snake_case__ : Union[str, Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case__ : List[Any] = megatron_to_transformers[op_name]
snake_case__ : str = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case__ : Any = transformer['final_layernorm.weight']
snake_case__ : Optional[int] = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case__ : Optional[Any] = word_embeddings
# It should be done!
return output_state_dict
def lowercase__( ):
# Create the argument parser.
snake_case__ : str = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=A , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=A , help='An optional config json file describing the pre-trained model.' , )
snake_case__ : Dict = parser.parse_args()
# Extract the basename.
snake_case__ : str = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
snake_case__ : Any = torch.load(A , map_location='cpu' )
else:
snake_case__ : Optional[int] = torch.load(args.path_to_checkpoint , map_location='cpu' )
snake_case__ : List[Any] = input_state_dict.get('args' , A )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case__ : Optional[int] = 'gelu_fast'
elif ds_args.openai_gelu:
snake_case__ : Optional[Any] = 'gelu_new'
else:
snake_case__ : str = 'gelu'
else:
# in the very early days this used to be "gelu_new"
snake_case__ : List[str] = 'gelu_new'
# Spell out all parameters in case the defaults change.
snake_case__ : str = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=A , summary_activation=A , summary_proj_to_labels=A , summary_first_dropout=0.1 , scale_attn_weights=A , use_cache=A , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
snake_case__ : str = GPTaConfig.from_json_file(args.config_file )
snake_case__ : Optional[Any] = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
snake_case__ : Dict = convert_megatron_checkpoint(A , A , A )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(A , A )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case__ : Union[str, Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case__ : Optional[int] = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
snake_case__ : Optional[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
snake_case__ : str = 'gpt2'
snake_case__ : int = AutoTokenizer.from_pretrained(A )
snake_case__ : Optional[int] = type(A ).__name__
snake_case__ : Tuple = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(A )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(A )
# Store the state_dict to file.
snake_case__ : Optional[Any] = os.path.join(A , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(A , A )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 170 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : str = 'Wav2Vec2FeatureExtractor'
_A : List[str] = 'AutoTokenizer'
def __init__( self , lowerCamelCase , lowerCamelCase ):
super().__init__(lowerCamelCase , lowerCamelCase )
snake_case__ = self.feature_extractor
snake_case__ = False
@classmethod
def A_ ( cls , lowerCamelCase , **lowerCamelCase ):
try:
return super().from_pretrained(lowerCamelCase , **lowerCamelCase )
except OSError:
warnings.warn(
F"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , lowerCamelCase , )
snake_case__ = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase , **lowerCamelCase )
snake_case__ = WavaVecaCTCTokenizer.from_pretrained(lowerCamelCase , **lowerCamelCase )
return cls(feature_extractor=lowerCamelCase , tokenizer=lowerCamelCase )
def __call__( self , *lowerCamelCase , **lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase , **lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
snake_case__ = kwargs.pop("raw_speech" )
else:
snake_case__ = kwargs.pop("audio" , lowerCamelCase )
snake_case__ = kwargs.pop("sampling_rate" , lowerCamelCase )
snake_case__ = kwargs.pop("text" , lowerCamelCase )
if len(lowerCamelCase ) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
snake_case__ = self.feature_extractor(lowerCamelCase , *lowerCamelCase , sampling_rate=lowerCamelCase , **lowerCamelCase )
if text is not None:
snake_case__ = self.tokenizer(lowerCamelCase , **lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case__ = encodings["input_ids"]
return inputs
def A_ ( self , *lowerCamelCase , **lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase , **lowerCamelCase )
snake_case__ = kwargs.pop("input_features" , lowerCamelCase )
snake_case__ = kwargs.pop("labels" , lowerCamelCase )
if len(lowerCamelCase ) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if input_features is not None:
snake_case__ = self.feature_extractor.pad(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
if labels is not None:
snake_case__ = self.tokenizer.pad(lowerCamelCase , **lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
snake_case__ = labels["input_ids"]
return input_features
def A_ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def A_ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@contextmanager
def A_ ( self ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
snake_case__ = True
snake_case__ = self.tokenizer
yield
snake_case__ = self.feature_extractor
snake_case__ = False
| 530 |
from math import factorial
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = 100 ):
return sum(int(__lowerCAmelCase ) for x in str(factorial(__lowerCAmelCase ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 530 | 1 |
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_UpperCamelCase = re.compile(R'''^(?P<major>\d+)''' R'''\.(?P<minor>\d+)''' R'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class _lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : Optional[Union[str, int]] =None
UpperCAmelCase_ : Optional[Union[str, int]] =None
UpperCAmelCase_ : Optional[Union[str, int]] =None
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case , __snake_case , __snake_case : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> int:
'''simple docstring'''
return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return self.major, self.minor, self.patch
def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if isinstance(UpperCAmelCase , UpperCAmelCase ):
return Version(UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
return other
raise TypeError(F"""{other} (type {type(UpperCAmelCase )}) cannot be compared to version.""" )
def __eq__( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
try:
__snake_case : Optional[Any] = self._validate_operand(UpperCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__snake_case : int = self._validate_operand(UpperCAmelCase )
return self.tuple < other.tuple
def __hash__( self ) -> int:
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCAmelCase ( cls , UpperCAmelCase ) -> str:
'''simple docstring'''
__snake_case : Any = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
return self.version_str
def lowerCAmelCase__( lowercase : int ) -> int:
__snake_case : List[str] = _VERSION_REG.match(lowercase )
if not res:
raise ValueError(f"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(lowercase ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def lowerCAmelCase__( lowercase : int ) -> List[Any]:
return ".".join(str(lowercase ) for v in version_tuple )
| 243 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
SCREAMING_SNAKE_CASE__:Dict = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 528 | 0 |
import math
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase ( lowerCAmelCase_ = 0.1 )-> int:
lowerCAmelCase_ : Tuple = 3
lowerCAmelCase_ : Dict = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCAmelCase_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 718 |
_UpperCAmelCase : Dict =[
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 0
while place < len(lowerCAmelCase_ ):
if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : List[Any] = []
for arabic, roman in ROMAN:
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 0 |
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_UpperCamelCase = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
_UpperCamelCase = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def a_ ( ) -> Tuple:
__lowerCamelCase : Dict = calculate_rouge(_lowerCAmelCase ,_lowerCAmelCase ,bootstrap_aggregation=_lowerCAmelCase ,rouge_keys=['rouge2', 'rougeL'] )
assert isinstance(_lowerCAmelCase ,_lowerCAmelCase )
__lowerCamelCase : Any = calculate_rouge(_lowerCAmelCase ,_lowerCAmelCase ,bootstrap_aggregation=_lowerCAmelCase ,rouge_keys=['rouge2'] )
assert (
pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean()
)
def a_ ( ) -> List[str]:
__lowerCamelCase : str = 'rougeLsum'
__lowerCamelCase : List[Any] = calculate_rouge(_lowerCAmelCase ,_lowerCAmelCase ,newline_sep=_lowerCAmelCase ,rouge_keys=[k] )[k]
__lowerCamelCase : Tuple = calculate_rouge(_lowerCAmelCase ,_lowerCAmelCase ,newline_sep=_lowerCAmelCase ,rouge_keys=[k] )[k]
assert score > score_no_sep
def a_ ( ) -> Optional[int]:
__lowerCamelCase : int = ['rouge1', 'rouge2', 'rougeL']
__lowerCamelCase : Union[str, Any] = calculate_rouge(_lowerCAmelCase ,_lowerCAmelCase ,newline_sep=_lowerCAmelCase ,rouge_keys=_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = calculate_rouge(_lowerCAmelCase ,_lowerCAmelCase ,newline_sep=_lowerCAmelCase ,rouge_keys=_lowerCAmelCase )
assert score_sep == score_no_sep
def a_ ( ) -> Tuple:
__lowerCamelCase : Optional[int] = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .',
]
__lowerCamelCase : List[str] = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(_lowerCAmelCase ,_lowerCAmelCase ,newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase ,_lowerCAmelCase ,newline_sep=_lowerCAmelCase )
def a_ ( ) -> Optional[Any]:
__lowerCamelCase : Tuple = [
'" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '
]
__lowerCamelCase : str = [
' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
__lowerCamelCase : Union[str, Any] = calculate_rouge(_lowerCAmelCase ,_lowerCAmelCase ,rouge_keys=['rougeLsum'] ,newline_sep=_lowerCAmelCase )['rougeLsum']
__lowerCamelCase : str = calculate_rouge(_lowerCAmelCase ,_lowerCAmelCase ,rouge_keys=['rougeLsum'] )['rougeLsum']
assert new_score > prev_score
def a_ ( ) -> Any:
__lowerCamelCase : Dict = Path('examples/seq2seq/test_data/wmt_en_ro' )
__lowerCamelCase : int = calculate_rouge_path(data_dir.joinpath('test.source' ) ,data_dir.joinpath('test.target' ) )
assert isinstance(_lowerCAmelCase ,_lowerCAmelCase )
__lowerCamelCase : List[Any] = calculate_rouge_path(
data_dir.joinpath('test.source' ) ,data_dir.joinpath('test.target' ) ,bootstrap_aggregation=_lowerCAmelCase )
assert isinstance(_lowerCAmelCase ,_lowerCAmelCase )
| 459 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : Dict , _a : int , _a : Any=13 , _a : Union[str, Any]=7 , _a : int=True , _a : Optional[int]=True , _a : Union[str, Any]=True , _a : Dict=True , _a : Optional[Any]=99 , _a : List[str]=32 , _a : Union[str, Any]=5 , _a : Optional[Any]=4 , _a : Dict=37 , _a : Dict="gelu" , _a : Optional[int]=0.1 , _a : Tuple=0.1 , _a : Dict=512 , _a : Tuple=16 , _a : List[str]=2 , _a : Union[str, Any]=0.02 , _a : List[Any]=False , _a : str=True , _a : List[Any]="None" , _a : Dict=3 , _a : Union[str, Any]=4 , _a : Union[str, Any]=None , ) -> List[str]:
__lowerCamelCase : Any = parent
__lowerCamelCase : List[Any] = batch_size
__lowerCamelCase : str = seq_length
__lowerCamelCase : Dict = is_training
__lowerCamelCase : str = use_input_mask
__lowerCamelCase : Any = use_token_type_ids
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : str = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Union[str, Any] = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : str = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : Tuple = hidden_dropout_prob
__lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : Dict = type_sequence_label_size
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Tuple = num_labels
__lowerCamelCase : Optional[Any] = num_choices
__lowerCamelCase : Any = relative_attention
__lowerCamelCase : Any = position_biased_input
__lowerCamelCase : Dict = pos_att_type
__lowerCamelCase : Any = scope
def _lowercase ( self : Dict ) -> str:
__lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Optional[Any] = None
if self.use_input_mask:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowerCamelCase : Optional[Any] = None
if self.use_token_type_ids:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Tuple = None
if self.use_labels:
__lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : List[str] ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _lowercase ( self : Any , _a : Optional[Any] ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _lowercase ( self : List[str] , _a : Any , _a : List[str] , _a : List[Any] , _a : Dict , _a : Union[str, Any] , _a : Tuple , _a : List[Any] ) -> List[str]:
__lowerCamelCase : str = DebertaVaModel(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : Optional[Any] = model(_a , attention_mask=_a , token_type_ids=_a )[0]
__lowerCamelCase : List[Any] = model(_a , token_type_ids=_a )[0]
__lowerCamelCase : int = model(_a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _lowercase ( self : Union[str, Any] , _a : str , _a : Optional[int] , _a : Dict , _a : Optional[int] , _a : Dict , _a : Optional[Any] , _a : Optional[Any] ) -> str:
__lowerCamelCase : List[Any] = DebertaVaForMaskedLM(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : Optional[int] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Optional[Any] , _a : List[str] , _a : str , _a : Any , _a : Dict , _a : Optional[Any] , _a : Optional[Any] , _a : str ) -> List[Any]:
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : List[str] = DebertaVaForSequenceClassification(_a )
model.to(_a )
model.eval()
__lowerCamelCase : Tuple = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_a )
def _lowercase ( self : int , _a : List[str] , _a : Union[str, Any] , _a : Any , _a : int , _a : Optional[int] , _a : Tuple , _a : Tuple ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = self.num_labels
__lowerCamelCase : Union[str, Any] = DebertaVaForTokenClassification(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : str = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Union[str, Any] , _a : Optional[int] , _a : List[Any] , _a : Optional[Any] , _a : List[str] , _a : Optional[int] , _a : Union[str, Any] , _a : Any ) -> Tuple:
__lowerCamelCase : int = DebertaVaForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : List[str] = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Union[str, Any] , _a : Any , _a : List[str] , _a : str , _a : str , _a : List[Any] , _a : Tuple , _a : str ) -> Optional[int]:
__lowerCamelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase : Optional[int] = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : Any ) -> Any:
__lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) : Optional[int] = config_and_inputs
__lowerCamelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a_ =(
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ =True
a_ =False
a_ =False
a_ =False
a_ =False
def _lowercase ( self : Tuple ) -> Dict:
__lowerCamelCase : str = DebertaVaModelTester(self )
__lowerCamelCase : Dict = ConfigTester(self , config_class=_a , hidden_size=37 )
def _lowercase ( self : int ) -> List[str]:
self.config_tester.run_common_tests()
def _lowercase ( self : Any ) -> int:
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_a )
def _lowercase ( self : Dict ) -> Tuple:
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_a )
def _lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_a )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_a )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_a )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_a )
@slow
def _lowercase ( self : Optional[int] ) -> int:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Any = DebertaVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def _lowercase ( self : List[str] ) -> Dict:
pass
@slow
def _lowercase ( self : List[str] ) -> List[Any]:
__lowerCamelCase : List[Any] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__lowerCamelCase : Optional[Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__lowerCamelCase : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCamelCase : List[str] = model(_a , attention_mask=_a )[0]
# compare the actual values for a slice.
__lowerCamelCase : Tuple = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
| 459 | 1 |
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCAmelCase_ ( __UpperCamelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(__UpperCamelCase ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ =2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def UpperCAmelCase_ ( __UpperCamelCase = 2_000_000 ):
return sum(takewhile(lambda __UpperCamelCase : x < n, prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 710 |
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
UpperCAmelCase__ = FunnelTokenizer
UpperCAmelCase__ = FunnelTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = '''unwanted, running'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [7, 4, 5, 10, 8, 9] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE_ = tokenizer('''UNwant\u00E9d,running''' )
SCREAMING_SNAKE_CASE_ = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE_ = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len ) | 626 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : torch.FloatTensor
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[int] , __lowercase : int = 3 , __lowercase : int = 3 , __lowercase : Tuple[str] = ("DownEncoderBlock2D",) , __lowercase : Tuple[str] = ("UpDecoderBlock2D",) , __lowercase : Tuple[int] = (64,) , __lowercase : int = 1 , __lowercase : str = "silu" , __lowercase : int = 3 , __lowercase : int = 32 , __lowercase : int = 256 , __lowercase : int = 32 , __lowercase : Optional[int] = None , __lowercase : float = 0.1_8_2_1_5 , __lowercase : str = "group" , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
__UpperCAmelCase : str = Encoder(
in_channels=__lowercase , out_channels=__lowercase , down_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , double_z=__lowercase , )
__UpperCAmelCase : Tuple = vq_embed_dim if vq_embed_dim is not None else latent_channels
__UpperCAmelCase : int = nn.Convad(__lowercase , __lowercase , 1 )
__UpperCAmelCase : Any = VectorQuantizer(__lowercase , __lowercase , beta=0.2_5 , remap=__lowercase , sane_index_shape=__lowercase )
__UpperCAmelCase : str = nn.Convad(__lowercase , __lowercase , 1 )
# pass init params to Decoder
__UpperCAmelCase : int = Decoder(
in_channels=__lowercase , out_channels=__lowercase , up_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , norm_type=__lowercase , )
@apply_forward_hook
def A_ ( self : Dict , __lowercase : torch.FloatTensor , __lowercase : bool = True ):
'''simple docstring'''
__UpperCAmelCase : int = self.encoder(__lowercase )
__UpperCAmelCase : List[str] = self.quant_conv(__lowercase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__lowercase )
@apply_forward_hook
def A_ ( self : List[Any] , __lowercase : torch.FloatTensor , __lowercase : bool = False , __lowercase : bool = True ):
'''simple docstring'''
if not force_not_quantize:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = self.quantize(__lowercase )
else:
__UpperCAmelCase : Optional[int] = h
__UpperCAmelCase : Union[str, Any] = self.post_quant_conv(__lowercase )
__UpperCAmelCase : Union[str, Any] = self.decoder(__lowercase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase )
def A_ ( self : Any , __lowercase : torch.FloatTensor , __lowercase : bool = True ):
'''simple docstring'''
__UpperCAmelCase : Tuple = sample
__UpperCAmelCase : List[str] = self.encode(__lowercase ).latents
__UpperCAmelCase : List[str] = self.decode(__lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase ) | 522 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
SCREAMING_SNAKE_CASE__ : Any = "pt"
elif is_tf_available():
SCREAMING_SNAKE_CASE__ : str = "tf"
else:
SCREAMING_SNAKE_CASE__ : Any = "jax"
class A_ ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase : Optional[Any] = PerceiverTokenizer
lowercase : List[str] = False
def lowercase_ ( self ) -> str:
super().setUp()
a : Dict = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ) -> List[str]:
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase_ ( self , **__UpperCAmelCase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=20 , __UpperCAmelCase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
a : List[Any] = []
for i in range(len(__UpperCAmelCase ) ):
try:
a : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=__UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
a : Any = list(filter(lambda __UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , __UpperCAmelCase ) )
a : Optional[int] = list(filter(lambda __UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__UpperCAmelCase ) , __UpperCAmelCase ) )
if max_length is not None and len(__UpperCAmelCase ) > max_length:
a : Tuple = toks[:max_length]
if min_length is not None and len(__UpperCAmelCase ) < min_length and len(__UpperCAmelCase ) > 0:
while len(__UpperCAmelCase ) < min_length:
a : List[Any] = toks + toks
# toks_str = [t[1] for t in toks]
a : Any = [t[0] for t in toks]
# Ensure consistency
a : Tuple = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
if " " not in output_txt and len(__UpperCAmelCase ) > 1:
a : Union[str, Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__UpperCAmelCase )
)
if with_prefix_space:
a : List[Any] = ' ' + output_txt
a : Any = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
return output_txt, output_ids
def lowercase_ ( self ) -> Any:
a : Optional[int] = self.perceiver_tokenizer
a : Optional[Any] = 'Unicode €.'
a : List[str] = tokenizer(__UpperCAmelCase )
a : str = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5]
self.assertEqual(encoded['input_ids'] , __UpperCAmelCase )
# decoding
a : Union[str, Any] = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
a : Any = tokenizer('e è é ê ë' )
a : int = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5]
self.assertEqual(encoded['input_ids'] , __UpperCAmelCase )
# decoding
a : Dict = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase_ ( self ) -> Any:
a : List[str] = self.perceiver_tokenizer
a : Tuple = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
a : int = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0]
# fmt: on
a : Optional[int] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
if FRAMEWORK != "jax":
a : str = list(batch.input_ids.numpy()[0] )
else:
a : Dict = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_ ( self ) -> Optional[int]:
a : Tuple = self.perceiver_tokenizer
a : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a : str = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , __UpperCAmelCase )
self.assertIn('attention_mask' , __UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , __UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , __UpperCAmelCase )
def lowercase_ ( self ) -> List[Any]:
a : Optional[Any] = self.perceiver_tokenizer
a : int = [
'Summary of the text.',
'Another summary.',
]
a : Dict = tokenizer(
text_target=__UpperCAmelCase , max_length=32 , padding='max_length' , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase_ ( self ) -> List[str]:
# safety check on max_len default value so we are sure the test works
a : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a : Tuple = tempfile.mkdtemp()
a : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
a : Tuple = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
a : Any = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
a : Tuple = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
shutil.rmtree(__UpperCAmelCase )
a : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a : str = tempfile.mkdtemp()
a : Dict = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
a : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
a : int = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
a : Optional[Any] = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
a : Dict = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a : int = tokenizer.__class__.from_pretrained(__UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__UpperCAmelCase )
def lowercase_ ( self ) -> Optional[Any]:
a : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
a : List[Any] = json.load(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
a : Dict = json.load(__UpperCAmelCase )
a : List[str] = [f'<extra_id_{i}>' for i in range(1_25 )]
a : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
a : str = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(__UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a : List[str] = tokenizer_class.from_pretrained(
__UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=__UpperCAmelCase )]
a : List[str] = tokenizer_class.from_pretrained(
__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase_ ( self ) -> Dict:
a : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_78] ) , '�' )
def lowercase_ ( self ) -> str:
pass
def lowercase_ ( self ) -> Optional[int]:
pass
def lowercase_ ( self ) -> List[Any]:
pass
def lowercase_ ( self ) -> Any:
pass
def lowercase_ ( self ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
a : Tuple = self.get_tokenizers(fast=__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
a : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
a : Tuple = tokenizer.convert_tokens_to_string(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
| 509 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a : int = TOKENIZER_CLASSES
else:
a : int = {tokenizer_name: getattr(UpperCAmelCase__ , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a : Any = TOKENIZER_CLASSES[tokenizer_name]
a : str = True
if checkpoint_name is None:
a : int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a : Any = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a : Any = tokenizer_class.from_pretrained(UpperCAmelCase__ , force_download=UpperCAmelCase__ )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a , a : Dict = checkpoint.split('/' )
a : Any = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
elif add_prefix:
a : int = checkpoint
a : List[str] = dump_path
else:
a : Union[str, Any] = None
a : Union[str, Any] = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a : Dict = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a : Optional[Any] = file_path.split(UpperCAmelCase__ )[-1][0]
if next_char == "/":
a : str = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
a : int = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a : Tuple = tokenizer.save_pretrained(
UpperCAmelCase__ , legacy_format=UpperCAmelCase__ , filename_prefix=UpperCAmelCase__ )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(UpperCAmelCase__ )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 509 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = ConsistencyModelPipeline
lowerCAmelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCAmelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowerCAmelCase__ = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def lowercase_ ( self : Optional[Any] , _A : int=False ):
'''simple docstring'''
if class_cond:
UpperCAmelCase__ : Union[str, Any] = self.dummy_cond_unet
else:
UpperCAmelCase__ : Any = self.dummy_uncond_unet
# Default to CM multistep sampler
UpperCAmelCase__ : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCAmelCase__ : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def lowercase_ ( self : Tuple , _A : str , _A : List[Any]=0 ):
'''simple docstring'''
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(_A )
else:
UpperCAmelCase__ : int = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : Tuple = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : int = self.get_dummy_components()
UpperCAmelCase__ : str = ConsistencyModelPipeline(**_A )
UpperCAmelCase__ : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Optional[int] = self.get_dummy_inputs(_A )
UpperCAmelCase__ : int = pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase__ : int = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : List[str] = self.get_dummy_components(class_cond=_A )
UpperCAmelCase__ : List[Any] = ConsistencyModelPipeline(**_A )
UpperCAmelCase__ : str = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : List[str] = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : Dict = pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase__ : int = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase__ : Any = ConsistencyModelPipeline(**_A )
UpperCAmelCase__ : Optional[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Optional[int] = self.get_dummy_inputs(_A )
UpperCAmelCase__ : List[str] = 1
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Any = pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : int = self.get_dummy_components(class_cond=_A )
UpperCAmelCase__ : Union[str, Any] = ConsistencyModelPipeline(**_A )
UpperCAmelCase__ : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Optional[Any] = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Tuple = 1
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : str = pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Tuple = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : List[Any] , _A : List[str]=0 , _A : int=False , _A : List[str]="cpu" , _A : Union[str, Any]=torch.floataa , _A : List[str]=(1, 3, 64, 64) ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = torch.manual_seed(_A )
UpperCAmelCase__ : int = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
UpperCAmelCase__ : Any = self.get_fixed_latents(seed=_A , device=_A , dtype=_A , shape=_A )
UpperCAmelCase__ : Optional[Any] = latents
return inputs
def lowercase_ ( self : List[str] , _A : Tuple=0 , _A : int="cpu" , _A : Optional[int]=torch.floataa , _A : List[str]=(1, 3, 64, 64) ):
'''simple docstring'''
if type(_A ) == str:
UpperCAmelCase__ : int = torch.device(_A )
UpperCAmelCase__ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : List[str] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
return latents
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
UpperCAmelCase__ : Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCAmelCase__ : Any = ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : str = self.get_inputs()
UpperCAmelCase__ : List[Any] = pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Optional[int] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
UpperCAmelCase__ : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCAmelCase__ : Any = ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : str = self.get_inputs()
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : str = pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Union[str, Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
UpperCAmelCase__ : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCAmelCase__ : List[Any] = ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Any = self.get_inputs(get_fixed_latents=_A , device=_A )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_A , enable_math=_A , enable_mem_efficient=_A ):
UpperCAmelCase__ : Tuple = pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase__ : str = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
UpperCAmelCase__ : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCAmelCase__ : int = ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Optional[Any] = self.get_inputs(get_fixed_latents=_A , device=_A )
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_A , enable_math=_A , enable_mem_efficient=_A ):
UpperCAmelCase__ : Dict = pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Dict = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 75 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A ( _A, _A, _A, _A, _A ):
"""simple docstring"""
# Load configuration defined in the metadata file
with open(_A ) as metadata_file:
snake_case_ :Union[str, Any] = json.load(_A )
snake_case_ :List[Any] = LukeConfig(use_entity_aware_attention=_A, **metadata["model_config"] )
# Load in the weights from the checkpoint_path
snake_case_ :List[Any] = torch.load(_A, map_location="cpu" )
# Load the entity vocab file
snake_case_ :str = load_entity_vocab(_A )
snake_case_ :List[str] = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case_ :Tuple = AddedToken("<ent>", lstrip=_A, rstrip=_A )
snake_case_ :int = AddedToken("<ent2>", lstrip=_A, rstrip=_A )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(_A )
with open(os.path.join(_A, LukeTokenizer.vocab_files_names["entity_vocab_file"] ), "w" ) as f:
json.dump(_A, _A )
snake_case_ :Tuple = LukeTokenizer.from_pretrained(_A )
# Initialize the embeddings of the special tokens
snake_case_ :Dict = state_dict["embeddings.word_embeddings.weight"]
snake_case_ :Optional[Any] = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
snake_case_ :Tuple = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
snake_case_ :Any = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case_ :Any = F'''encoder.layer.{layer_index}.attention.self.'''
snake_case_ :Optional[int] = state_dict[prefix + matrix_name]
snake_case_ :Dict = state_dict[prefix + matrix_name]
snake_case_ :Optional[int] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case_ :Union[str, Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
snake_case_ :List[str] = entity_emb[entity_vocab["[MASK]"]]
snake_case_ :int = LukeModel(config=_A ).eval()
snake_case_ , snake_case_ :int = model.load_state_dict(_A, strict=_A )
if not (len(_A ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {', '.join(_A )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' )
# Check outputs
snake_case_ :Any = LukeTokenizer.from_pretrained(_A, task="entity_classification" )
snake_case_ :Tuple = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
snake_case_ :Dict = (39, 42)
snake_case_ :Any = tokenizer(_A, entity_spans=[span], add_prefix_space=_A, return_tensors="pt" )
snake_case_ :Tuple = model(**_A )
# Verify word hidden states
if model_size == "large":
snake_case_ :Tuple = torch.Size((1, 42, 1_024) )
snake_case_ :Dict = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
snake_case_ :Tuple = torch.Size((1, 42, 768) )
snake_case_ :Dict = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], _A, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
snake_case_ :List[str] = torch.Size((1, 1, 1_024) )
snake_case_ :Dict = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
snake_case_ :Optional[int] = torch.Size((1, 1, 768) )
snake_case_ :List[str] = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], _A, atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(_A ) )
model.save_pretrained(_A )
def A ( _A ):
"""simple docstring"""
snake_case_ :List[Any] = {}
with open(_A, "r", encoding="utf-8" ) as f:
for index, line in enumerate(_A ):
snake_case_ , snake_case_ :Tuple = line.rstrip().split("\t" )
snake_case_ :Dict = index
return entity_vocab
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__UpperCAmelCase : Optional[int] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 584 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase=False):
try:
UpperCamelCase_ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase_ = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase_ = strtobool(_lowerCAmelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""")
return _value
UpperCAmelCase : Dict =parse_flag_from_env("""RUN_SLOW""", default=False)
UpperCAmelCase : List[Any] =parse_flag_from_env("""RUN_REMOTE""", default=False)
UpperCAmelCase : Dict =parse_flag_from_env("""RUN_LOCAL""", default=True)
UpperCAmelCase : Tuple =parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
UpperCAmelCase : Optional[int] =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
UpperCAmelCase : Any =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
UpperCAmelCase : Union[str, Any] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
UpperCAmelCase : int =pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
UpperCAmelCase : Optional[int] =pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
UpperCAmelCase : Union[str, Any] =pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
UpperCAmelCase : Union[str, Any] =pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def _lowerCAmelCase (_lowerCAmelCase):
try:
import faiss # noqa
except ImportError:
UpperCamelCase_ = unittest.skip("test requires faiss")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
try:
import regex # noqa
except ImportError:
UpperCamelCase_ = unittest.skip("test requires regex")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase_ = unittest.skip("test requires elasticsearch")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase_ = unittest.skip("test requires sqlalchemy")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
if not config.TORCH_AVAILABLE:
UpperCamelCase_ = unittest.skip("test requires PyTorch")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
if not config.TF_AVAILABLE:
UpperCamelCase_ = unittest.skip("test requires TensorFlow")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
if not config.JAX_AVAILABLE:
UpperCamelCase_ = unittest.skip("test requires JAX")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
if not config.PIL_AVAILABLE:
UpperCamelCase_ = unittest.skip("test requires Pillow")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers")(_lowerCAmelCase)
else:
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken")(_lowerCAmelCase)
else:
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy")(_lowerCAmelCase)
else:
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
def _require_spacy_model(_lowerCAmelCase):
try:
import spacy # noqa F401
spacy.load(_lowerCAmelCase)
except ImportError:
return unittest.skip("test requires spacy")(_lowerCAmelCase)
except OSError:
return unittest.skip("test requires spacy model '{}'".format(_lowerCAmelCase))(_lowerCAmelCase)
else:
return test_case
return _require_spacy_model
def _lowerCAmelCase (_lowerCAmelCase):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark")(_lowerCAmelCase)
else:
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark")(_lowerCAmelCase)
else:
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase_ = unittest.skip("test is slow")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase_ = unittest.skip("test is local")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase_ = unittest.skip("test is packaged")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase_ = unittest.skip("test requires remote")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (*_lowerCAmelCase):
def decorate(cls):
for name, fn in cls.__dict__.items():
if callable(_lowerCAmelCase) and name.startswith("test"):
for decorator in decorators:
UpperCamelCase_ = decorator(_lowerCAmelCase)
setattr(cls , _lowerCAmelCase , _lowerCAmelCase)
return cls
return decorate
class _lowercase (a_ ):
'''simple docstring'''
pass
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = 1
lowercase__ = 2
@contextmanager
def _lowerCAmelCase (_lowerCAmelCase=OfflineSimulationMode.CONNECTION_FAILS , _lowerCAmelCase=1e-16):
UpperCamelCase_ = requests.Session().request
def timeout_request(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase_ = "https://10.255.255.1"
if kwargs.get("timeout") is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""")
UpperCamelCase_ = timeout
try:
return online_request(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase_ = url
UpperCamelCase_ = e.args[0]
UpperCamelCase_ = (max_retry_error.args[0].replace("10.255.255.1" , f"""OfflineMock[{url}]"""),)
UpperCamelCase_ = (max_retry_error,)
raise
def raise_connection_error(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase):
raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCAmelCase)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , _lowerCAmelCase):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , _lowerCAmelCase):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCAmelCase):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum.")
@contextmanager
def _lowerCAmelCase (*_lowerCAmelCase , **_lowerCAmelCase):
UpperCamelCase_ = str(Path().resolve())
with tempfile.TemporaryDirectory(*_lowerCAmelCase , **_lowerCAmelCase) as tmp_dir:
try:
os.chdir(_lowerCAmelCase)
yield
finally:
os.chdir(_lowerCAmelCase)
@contextmanager
def _lowerCAmelCase ():
import gc
gc.collect()
UpperCamelCase_ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowerCAmelCase ():
import gc
gc.collect()
UpperCamelCase_ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
return deepcopy(_lowerCAmelCase).integers(0 , 1_00 , 10).tolist() == deepcopy(_lowerCAmelCase).integers(0 , 1_00 , 10).tolist()
def _lowerCAmelCase (_lowerCAmelCase):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase):
try:
return func(*_lowerCAmelCase , **_lowerCAmelCase)
except HTTPError as err:
if str(_lowerCAmelCase).startswith("500") or str(_lowerCAmelCase).startswith("502"):
pytest.xfail(str(_lowerCAmelCase))
raise err
return decorator.decorator(_wrapper , _lowerCAmelCase)
class _lowercase :
'''simple docstring'''
def __init__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = returncode
UpperCamelCase_ = stdout
UpperCamelCase_ = stderr
async def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
while True:
UpperCamelCase_ = await stream.readline()
if line:
callback(_lowerCAmelCase)
else:
break
async def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False):
if echo:
print("\nRunning: " , " ".join(_lowerCAmelCase))
UpperCamelCase_ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase_ = []
UpperCamelCase_ = []
def tee(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=""):
UpperCamelCase_ = line.decode("utf-8").rstrip()
sink.append(_lowerCAmelCase)
if not quiet:
print(_lowerCAmelCase , _lowerCAmelCase , file=_lowerCAmelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCAmelCase: tee(_lowerCAmelCase , _lowerCAmelCase , sys.stdout , label="stdout:")),
_read_stream(p.stderr , lambda _lowerCAmelCase: tee(_lowerCAmelCase , _lowerCAmelCase , sys.stderr , label="stderr:")),
] , timeout=_lowerCAmelCase , )
return _RunOutput(await p.wait() , _lowerCAmelCase , _lowerCAmelCase)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=1_80 , _lowerCAmelCase=False , _lowerCAmelCase=True):
UpperCamelCase_ = asyncio.get_event_loop()
UpperCamelCase_ = loop.run_until_complete(
_stream_subprocess(_lowerCAmelCase , env=_lowerCAmelCase , stdin=_lowerCAmelCase , timeout=_lowerCAmelCase , quiet=_lowerCAmelCase , echo=_lowerCAmelCase))
UpperCamelCase_ = " ".join(_lowerCAmelCase)
if result.returncode > 0:
UpperCamelCase_ = "\n".join(result.stderr)
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""")
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""")
return result
def _lowerCAmelCase ():
UpperCamelCase_ = os.environ.get("PYTEST_XDIST_WORKER" , "gw0")
UpperCamelCase_ = re.sub(r"^gw" , "" , _lowerCAmelCase , 0 , re.M)
return int(_lowerCAmelCase)
def _lowerCAmelCase ():
UpperCamelCase_ = 2_95_00
UpperCamelCase_ = pytest_xdist_worker_id()
return port + uniq_delta
| 721 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def _lowerCAmelCase (_lowerCAmelCase):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase):
raise TypeError("Undefined for non-integers")
elif precision < 1:
raise ValueError("Undefined for non-natural numbers")
UpperCamelCase_ = precision
UpperCamelCase_ = ceil(precision / 14)
UpperCamelCase_ = 42_68_80 * Decimal(1_00_05).sqrt()
UpperCamelCase_ = 1
UpperCamelCase_ = 13_59_14_09
UpperCamelCase_ = Decimal(_lowerCAmelCase)
for k in range(1 , _lowerCAmelCase):
UpperCamelCase_ = factorial(6 * k) // (factorial(3 * k) * factorial(_lowerCAmelCase) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term) / exponential_term
return str(constant_term / partial_sum)[:-1]
if __name__ == "__main__":
UpperCAmelCase : Any =50
print(F"The first {n} digits of pi is: {pi(n)}")
| 504 | 0 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_lowerCAmelCase : Union[str, Any] = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ,a_ = None ,a_ = None ,a_ = True ,):
"""simple docstring"""
lowerCAmelCase__ = [file for file in os.listdir(SCREAMING_SNAKE_CASE__ ) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )]
if identifier is not None:
lowerCAmelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
for n_ in n_identifier:
lowerCAmelCase__ = [file for file in files if n_ not in file]
else:
lowerCAmelCase__ = [file for file in files if n_identifier not in file]
lowerCAmelCase__ = ignore_files or []
ignore_files.append('__init__.py' )
lowerCAmelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,SCREAMING_SNAKE_CASE__ )
if only_modules:
lowerCAmelCase__ = file.split('.' )[0]
try:
lowerCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__ )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(f'{module_identifier} is not a module.' )
else:
lowerCAmelCase__ = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = Path('src/transformers' )
lowerCAmelCase__ = """modeling"""
lowerCAmelCase__ = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = Path('src/transformers' )
lowerCAmelCase__ = """tokenization"""
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = Path('src/transformers' )
lowerCAmelCase__ = """configuration"""
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = Path('src/transformers' )
lowerCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,n_identifier=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = Path('docs/source' )
lowerCAmelCase__ = ["""favicon.ico"""]
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__ ,only_modules=SCREAMING_SNAKE_CASE__ )
| 193 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase__ : Union[str, Any] = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowercase_ ( _snake_case ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def lowercase_ ( _snake_case ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : Any = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_snake_case ,id=_snake_case )
| 223 | 0 |
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
lowercase : List[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def snake_case( ) -> List[str]:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 596 |
import os
lowerCAmelCase_ = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 1_00, 'D': 5_00, 'M': 10_00}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Any = 0
lowercase : Any = 0
while index < len(__magic_name__ ) - 1:
lowercase : List[Any] = SYMBOLS[numerals[index]]
lowercase : Optional[Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
lowercase : List[Any] = ''''''
lowercase : Tuple = num // 10_00
numerals += m_count * "M"
num %= 10_00
lowercase : int = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
lowercase : Optional[Any] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def snake_case( __magic_name__ = "/p089_roman.txt" ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = 0
with open(os.path.dirname(__magic_name__ ) + roman_numerals_filename ) as filea:
lowercase : List[str] = filea.readlines()
for line in lines:
lowercase : Dict = line.strip()
lowercase : Optional[int] = parse_roman_numerals(__magic_name__ )
lowercase : List[Any] = generate_roman_numerals(__magic_name__ )
savings += len(__magic_name__ ) - len(__magic_name__ )
return savings
if __name__ == "__main__":
print(f'''{solution() = }''') | 596 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname , __UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self : List[Any] , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowerCamelCase_ ( self : List[Any] , **lowerCamelCase_ : str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE : str = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : int = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : str = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE : Dict = self.get_image_processor(do_normalize=__UpperCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE : Dict = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : List[Any] = image_processor(__UpperCamelCase , return_tensors="""np""" )
SCREAMING_SNAKE_CASE : int = processor(images=__UpperCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = """lower newer"""
SCREAMING_SNAKE_CASE : Any = processor(text=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = """lower newer"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(__UpperCamelCase ):
processor()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_image_processor()
SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : Any = processor.batch_decode(__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = """lower newer"""
SCREAMING_SNAKE_CASE : int = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 379 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase = None
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
lowercase = {
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
lowercase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE = MBartTokenizer
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
def __init__( self : Any , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : int=None , __UpperCamelCase : Dict="<s>" , __UpperCamelCase : Tuple="</s>" , __UpperCamelCase : Union[str, Any]="</s>" , __UpperCamelCase : Optional[Any]="<s>" , __UpperCamelCase : Optional[Any]="<unk>" , __UpperCamelCase : Dict="<pad>" , __UpperCamelCase : Dict="<mask>" , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
vocab_file=__UpperCamelCase , tokenizer_file=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(__UpperCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else """en_XX"""
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase__ ( self : List[str] ):
return self._src_lang
@src_lang.setter
def lowercase__ ( self : Optional[int] , __UpperCamelCase : str ):
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : int , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[str] , __UpperCamelCase : Optional[str] , **__UpperCamelCase : Dict ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase_ = self.convert_tokens_to_ids(__UpperCamelCase )
lowerCamelCase_ = tgt_lang_id
return inputs
def lowercase__ ( self : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : str = "en_XX" , __UpperCamelCase : Optional[List[str]] = None , __UpperCamelCase : str = "ro_RO" , **__UpperCamelCase : Optional[int] , ):
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : int ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase__ ( self : Dict ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase__ ( self : Dict , __UpperCamelCase : Dict ):
lowerCamelCase_ = self.convert_tokens_to_ids(__UpperCamelCase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase__ ( self : Dict , __UpperCamelCase : str ):
lowerCamelCase_ = self.convert_tokens_to_ids(__UpperCamelCase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase__ ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowerCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 272 | 0 |
import os
from math import logaa
def lowerCAmelCase_ ( UpperCamelCase_ = "base_exp.txt" ) -> int:
UpperCamelCase_ = 0
UpperCamelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(UpperCamelCase_ ) , UpperCamelCase_ ) ) ):
UpperCamelCase_ , UpperCamelCase_ = list(map(UpperCamelCase_ , line.split("," ) ) )
if x * logaa(UpperCamelCase_ ) > largest:
UpperCamelCase_ = x * logaa(UpperCamelCase_ )
UpperCamelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 371 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_UpperCAmelCase = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_UpperCAmelCase = {
'facebook/blenderbot_small-90M': 5_1_2,
}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = BlenderbotSmallTokenizer
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: List[str]=None , _SCREAMING_SNAKE_CASE: Optional[int]="<|endoftext|>" , _SCREAMING_SNAKE_CASE: List[Any]="<|endoftext|>" , _SCREAMING_SNAKE_CASE: List[str]="<|endoftext|>" , _SCREAMING_SNAKE_CASE: Optional[Any]=False , _SCREAMING_SNAKE_CASE: int=True , **_SCREAMING_SNAKE_CASE: Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=_SCREAMING_SNAKE_CASE , merges=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE , ) , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = add_prefix_space
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Any=None ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 371 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int ):
if len(snake_case_ ) == 0:
return False
__UpperCAmelCase = len(snake_case_ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case_ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case_ )
if __name__ == "__main__":
_lowercase : str = input('Enter numbers separated by comma:\n').strip()
_lowercase : int = [int(item.strip()) for item in user_input.split(',')]
_lowercase : str = int(input('Enter the number to be found in the list:\n').strip())
_lowercase : Union[str, Any] = '' if binary_search(sequence, target) else 'not '
print(f"""{target} was {not_str}found in {sequence}""")
| 49 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCamelCase_ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 256 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
def UpperCamelCase__ ( _lowercase : str ) -> list[str]:
if not isinstance(_lowercase , _lowercase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(_lowercase ) )]
def UpperCamelCase__ ( _lowercase : str ) -> BWTTransformDict:
if not isinstance(_lowercase , _lowercase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
__UpperCAmelCase: Optional[Any] = all_rotations(_lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
__UpperCAmelCase: BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_lowercase ),
}
return response
def UpperCamelCase__ ( _lowercase : str , _lowercase : int ) -> str:
if not isinstance(_lowercase , _lowercase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
__UpperCAmelCase: List[Any] = int(_lowercase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(_lowercase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
__UpperCAmelCase: Union[str, Any] = [""""""] * len(_lowercase )
for _ in range(len(_lowercase ) ):
for i in range(len(_lowercase ) ):
__UpperCAmelCase: str = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 'Provide a string that I will generate its BWT transform: '
SCREAMING_SNAKE_CASE_ = input(entry_msg).strip()
SCREAMING_SNAKE_CASE_ = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result["bwt_string"]}'"""
)
SCREAMING_SNAKE_CASE_ = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
F"""we get original string '{original_string}'"""
) | 466 | '''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = 10
def UpperCamelCase__ ( _lowercase : list[int] ) -> list[int]:
__UpperCAmelCase: Union[str, Any] = 1
__UpperCAmelCase: Optional[Any] = max(_lowercase )
while placement <= max_digit:
# declare and initialize empty buckets
__UpperCAmelCase: list[list] = [[] for _ in range(_lowercase )]
# split list_of_ints between the buckets
for i in list_of_ints:
__UpperCAmelCase: Optional[Any] = int((i / placement) % RADIX )
buckets[tmp].append(_lowercase )
# put each buckets' contents into list_of_ints
__UpperCAmelCase: Optional[int] = 0
for b in range(_lowercase ):
for i in buckets[b]:
__UpperCAmelCase: str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod() | 466 | 1 |
import math
def _snake_case ( __snake_case = 100 ):
_UpperCamelCase = sum(i * i for i in range(1 , n + 1 ) )
_UpperCamelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 10 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowercase__( )-> Optional[int]:
"""simple docstring"""
raise RuntimeError("CUDA out of memory." )
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
_UpperCamelCase = nn.Linear(3 , 4 )
_UpperCamelCase = nn.BatchNormad(4 )
_UpperCamelCase = nn.Linear(4 , 5 )
def a ( self , A_ ):
return self.lineara(self.batchnorm(self.lineara(A_ ) ) )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def a ( self ):
_UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(A_ ):
nonlocal batch_sizes
batch_sizes.append(A_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(A_ , [1_28, 64, 32, 16, 8] )
def a ( self ):
_UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(A_ , A_ ):
nonlocal batch_sizes
batch_sizes.append(A_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_UpperCamelCase , _UpperCamelCase = mock_training_loop_function("hello" )
self.assertListEqual(A_ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def a ( self ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(A_ ):
pass
with self.assertRaises(A_ ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def a ( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(A_ ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def a ( self ):
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(A_ , A_ , A_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(A_ ) as cm:
mock_training_loop_function(1_28 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def a ( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A_ ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(A_ ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def a ( self ):
_UpperCamelCase = torch.cuda.memory_allocated()
_UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , A_ )
_UpperCamelCase = release_memory(A_ )
self.assertEqual(torch.cuda.memory_allocated() , A_ )
| 138 | 0 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
UpperCAmelCase : Tuple = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(UpperCAmelCase_ )
DownloadCommand.register_subcommand(UpperCAmelCase_ )
EnvironmentCommand.register_subcommand(UpperCAmelCase_ )
RunCommand.register_subcommand(UpperCAmelCase_ )
ServeCommand.register_subcommand(UpperCAmelCase_ )
UserCommands.register_subcommand(UpperCAmelCase_ )
AddNewModelCommand.register_subcommand(UpperCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(UpperCAmelCase_ )
LfsCommands.register_subcommand(UpperCAmelCase_ )
PTtoTFCommand.register_subcommand(UpperCAmelCase_ )
# Let's go
UpperCAmelCase : str = parser.parse_args()
if not hasattr(UpperCAmelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase : Union[str, Any] = args.func(UpperCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
'''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class lowerCAmelCase ( __lowerCAmelCase):
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Optional[Any]:
'''simple docstring'''
__snake_case = max_length
__snake_case = max_position_embeddings
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
__snake_case = input_ids.shape[-1]
__snake_case = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
'''with `max_length = start_length + max_new_tokens` instead.''' , __SCREAMING_SNAKE_CASE , )
__snake_case = start_length
__snake_case = max_new_tokens
__snake_case = start_length + max_new_tokens
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[str]:
'''simple docstring'''
__snake_case = max_time
__snake_case = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class lowerCAmelCase ( __lowerCAmelCase):
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
return any(criteria(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for criteria in self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
for stopping_criterium in self:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return stopping_criterium.max_length
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return stopping_criterium.max_length
return None
def _UpperCamelCase (_lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int )-> StoppingCriteriaList:
'''simple docstring'''
__snake_case = stopping_criteria.max_length
__snake_case = deepcopy(_lowerCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , _lowerCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) )
return new_stopping_criteria
| 24 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = deque()
__snake_case = [False for _ in range(_lowerCamelCase )]
__snake_case = [-1 for _ in range(_lowerCamelCase )]
__snake_case = index_of[:]
def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__snake_case = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 24 | 1 |
import numpy as np
from transformers import Pipeline
def a ( A__ : List[str] ) -> int:
"""simple docstring"""
_lowercase =np.max(A__ , axis=-1 , keepdims=A__ )
_lowercase =np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=A__ )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def A__ ( self , **lowerCAmelCase ) -> List[str]:
'''simple docstring'''
_lowercase ={}
if "second_text" in kwargs:
_lowercase =kwargs['second_text']
return preprocess_kwargs, {}, {}
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ) -> Tuple:
'''simple docstring'''
return self.tokenizer(lowerCAmelCase , text_pair=lowerCAmelCase , return_tensors=self.framework )
def A__ ( self , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
return self.model(**lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
_lowercase =model_outputs.logits[0].numpy()
_lowercase =softmax(lowerCAmelCase )
_lowercase =np.argmax(lowerCAmelCase )
_lowercase =self.model.config.idalabel[best_class]
_lowercase =probabilities[best_class].item()
_lowercase =logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 713 |
import requests
lowercase_ = 'YOUR API KEY'
def a ( A__ : str , A__ : str = giphy_api_key ) -> list:
"""simple docstring"""
_lowercase ='+'.join(query.split() )
_lowercase =F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
_lowercase =requests.get(A__ ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 380 | 0 |
def A_ ( A__ , A__ , A__ , A__ ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def A_ ( A__ , A__ , A__ ) -> bool:
# Base Case
if curr_ind == len(A__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(A__ ) ):
if valid_connection(A__ , A__ , A__ , A__ ):
# Insert current vertex into path as next transition
a__ : List[Any] = next_ver
# Validate created path
if util_hamilton_cycle(A__ , A__ , curr_ind + 1 ):
return True
# Backtrack
a__ : List[str] = -1
return False
def A_ ( A__ , A__ = 0 ) -> list[int]:
a__ : List[Any] = [-1] * (len(A__ ) + 1)
# initialize start and end of path with starting index
a__ : Any = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(A__ , A__ , 1 ) else []
| 302 |
import os
def A_ ( A__ = "matrix.txt" ) -> int:
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
a__ : Dict = in_file.read()
a__ : Optional[Any] = [[int(A__ ) for cell in row.split(',' )] for row in data.strip().splitlines()]
a__ : Optional[int] = [[0 for cell in row] for row in grid]
a__ : Tuple = len(grid[0] )
a__ : Optional[int] = [[0 for i in range(A__ )] for j in range(A__ )]
a__ : List[str] = grid[0][0]
for i in range(1 , A__ ):
a__ : Tuple = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
a__ : Optional[Any] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
a__ : Tuple = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 302 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ : str = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ["""ConvNextFeatureExtractor"""]
a__ : Dict = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
a__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 707 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : Dict = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
UpperCamelCase : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_lowerCAmelCase = CLIPTextModel(__magic_name__ )
_lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=0 ):
"""simple docstring"""
if str(__magic_name__ ).startswith('mps' ):
_lowerCAmelCase = torch.manual_seed(__magic_name__ )
else:
_lowerCAmelCase = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
_lowerCAmelCase = 2
_lowerCAmelCase = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=__magic_name__ , device=torch.device(__magic_name__ ) , )
_lowerCAmelCase = floats_tensor(control_image.shape , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(__magic_name__ ) ).convert('RGB' ).resize((6_4, 6_4) )
_lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : str = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Optional[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
def init_weights(__magic_name__ ):
if isinstance(__magic_name__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(__magic_name__ )
torch.manual_seed(0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(__magic_name__ )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_lowerCAmelCase = CLIPTextModel(__magic_name__ )
_lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase = MultiControlNetModel([controlneta, controlneta] )
_lowerCAmelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=0 ):
"""simple docstring"""
if str(__magic_name__ ).startswith('mps' ):
_lowerCAmelCase = torch.manual_seed(__magic_name__ )
else:
_lowerCAmelCase = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
_lowerCAmelCase = 2
_lowerCAmelCase = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=__magic_name__ , device=torch.device(__magic_name__ ) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=__magic_name__ , device=torch.device(__magic_name__ ) , ),
]
_lowerCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(__magic_name__ ) ).convert('RGB' ).resize((6_4, 6_4) )
_lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
_lowerCAmelCase = 10.0
_lowerCAmelCase = 4
_lowerCAmelCase = self.get_dummy_inputs(__magic_name__ )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__magic_name__ )[0]
_lowerCAmelCase = self.get_dummy_inputs(__magic_name__ )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__magic_name__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_lowerCAmelCase = self.get_dummy_inputs(__magic_name__ )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__magic_name__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_lowerCAmelCase = self.get_dummy_inputs(__magic_name__ )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__magic_name__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def _lowerCamelCase ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__magic_name__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
_lowerCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=__magic_name__ , controlnet=__magic_name__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase = 'evil space-punk bird'
_lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((5_1_2, 5_1_2) )
_lowerCAmelCase = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((5_1_2, 5_1_2) )
_lowerCAmelCase = pipe(
__magic_name__ , __magic_name__ , control_image=__magic_name__ , generator=__magic_name__ , output_type='np' , num_inference_steps=5_0 , strength=0.6 , )
_lowerCAmelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 309 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if len(UpperCamelCase__ ) != 2 or len(a[0] ) != 2 or len(UpperCamelCase__ ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
__magic_name__ : Union[str, Any] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(UpperCamelCase__ ) )
]
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(UpperCamelCase__ ) )
]
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if len(UpperCamelCase__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
__magic_name__ : Dict = len(UpperCamelCase__ )
__magic_name__ : int = matrix_length // 2
__magic_name__ : Union[str, Any] = [[a[i][j] for j in range(UpperCamelCase__ , UpperCamelCase__ )] for i in range(UpperCamelCase__ )]
__magic_name__ : List[Any] = [
[a[i][j] for j in range(UpperCamelCase__ , UpperCamelCase__ )] for i in range(UpperCamelCase__ , UpperCamelCase__ )
]
__magic_name__ : Optional[int] = [[a[i][j] for j in range(UpperCamelCase__ )] for i in range(UpperCamelCase__ )]
__magic_name__ : List[str] = [[a[i][j] for j in range(UpperCamelCase__ )] for i in range(UpperCamelCase__ , UpperCamelCase__ )]
return top_left, top_right, bot_left, bot_right
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return len(UpperCamelCase__ ), len(matrix[0] )
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
print("\n".join(str(UpperCamelCase__ ) for line in matrix ) )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if matrix_dimensions(UpperCamelCase__ ) == (2, 2):
return default_matrix_multiplication(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : Optional[int] = split_matrix(UpperCamelCase__ )
__magic_name__ : Optional[int] = split_matrix(UpperCamelCase__ )
__magic_name__ : Any = actual_strassen(UpperCamelCase__ , matrix_subtraction(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ : Optional[Any] = actual_strassen(matrix_addition(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
__magic_name__ : Optional[Any] = actual_strassen(matrix_addition(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
__magic_name__ : Optional[int] = actual_strassen(UpperCamelCase__ , matrix_subtraction(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ : List[Any] = actual_strassen(matrix_addition(UpperCamelCase__ , UpperCamelCase__ ) , matrix_addition(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ : Union[str, Any] = actual_strassen(matrix_subtraction(UpperCamelCase__ , UpperCamelCase__ ) , matrix_addition(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ : Union[str, Any] = actual_strassen(matrix_subtraction(UpperCamelCase__ , UpperCamelCase__ ) , matrix_addition(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ : Optional[int] = matrix_addition(matrix_subtraction(matrix_addition(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ ) , UpperCamelCase__ )
__magic_name__ : List[Any] = matrix_addition(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : int = matrix_addition(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : List[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ ) , UpperCamelCase__ )
# construct the new matrix from our 4 quadrants
__magic_name__ : str = []
for i in range(len(UpperCamelCase__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(UpperCamelCase__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if matrix_dimensions(UpperCamelCase__ )[1] != matrix_dimensions(UpperCamelCase__ )[0]:
__magic_name__ : Dict = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(UpperCamelCase__ )
__magic_name__ : Tuple = matrix_dimensions(UpperCamelCase__ )
__magic_name__ : Any = matrix_dimensions(UpperCamelCase__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__magic_name__ : int = max(*UpperCamelCase__ , *UpperCamelCase__ )
__magic_name__ : Dict = int(math.pow(2 , math.ceil(math.loga(UpperCamelCase__ ) ) ) )
__magic_name__ : List[Any] = matrixa
__magic_name__ : List[str] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , UpperCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , UpperCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , UpperCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__magic_name__ : List[Any] = actual_strassen(UpperCamelCase__ , UpperCamelCase__ )
# Removing the additional zeros
for i in range(0 , UpperCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , UpperCamelCase__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_SCREAMING_SNAKE_CASE : Optional[int] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa)) | 436 |
from math import factorial
def SCREAMING_SNAKE_CASE__ ( lowercase = 20 ) -> int:
snake_case : Dict = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case : Dict = n // 2
return int(factorial(lowercase ) / (factorial(lowercase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
lowerCamelCase : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 587 | 0 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__ : List[str] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def _a ( lowercase__ : Tuple=None ):
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = subparsers.add_parser('tpu-config' , description=_description )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
SCREAMING_SNAKE_CASE__ : Tuple = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=snake_case__ , default=snake_case__ , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=snake_case__ , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=snake_case__ , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
SCREAMING_SNAKE_CASE__ : List[str] = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=snake_case__ , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def _a ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(snake_case__ ):
SCREAMING_SNAKE_CASE__ : int = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
SCREAMING_SNAKE_CASE__ : str = defaults.command_file
if not args.command and defaults.commands is not None:
SCREAMING_SNAKE_CASE__ : Tuple = defaults.commands
if not args.tpu_name:
SCREAMING_SNAKE_CASE__ : List[Any] = defaults.tpu_name
if not args.tpu_zone:
SCREAMING_SNAKE_CASE__ : int = defaults.tpu_zone
if args.accelerate_version == "dev":
SCREAMING_SNAKE_CASE__ : Tuple = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
SCREAMING_SNAKE_CASE__ : str = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , snake_case__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
SCREAMING_SNAKE_CASE__ : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , snake_case__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
SCREAMING_SNAKE_CASE__ : Dict = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
SCREAMING_SNAKE_CASE__ : Tuple = '; '.join(snake_case__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
SCREAMING_SNAKE_CASE__ : List[Any] = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {' '.join(snake_case__ )}''' )
return
subprocess.run(snake_case__ )
print('Successfully setup pod.' )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = tpu_command_parser()
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
tpu_command_launcher(snake_case__ )
| 717 | import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'OwlViTImageProcessor'
lowercase_ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[str] , a_ : List[Any]=None , a_ : str=None , **a_ : Any )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE__ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a_ , a_ )
def __call__( self : Any , a_ : Optional[int]=None , a_ : Tuple=None , a_ : List[Any]=None , a_ : Tuple="max_length" , a_ : str="np" , **a_ : Any )-> int:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(a_ , a_ ) or (isinstance(a_ , a_ ) and not isinstance(text[0] , a_ )):
SCREAMING_SNAKE_CASE__ : Tuple = [self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )]
elif isinstance(a_ , a_ ) and isinstance(text[0] , a_ ):
SCREAMING_SNAKE_CASE__ : Any = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE__ : str = max([len(a_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(a_ ) != max_num_queries:
SCREAMING_SNAKE_CASE__ : Tuple = t + [' '] * (max_num_queries - len(a_ ))
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )
encodings.append(a_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
SCREAMING_SNAKE_CASE__ : Dict = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ : List[Any] = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ : str = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ : Dict = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
SCREAMING_SNAKE_CASE__ : Optional[int] = BatchEncoding()
SCREAMING_SNAKE_CASE__ : List[str] = input_ids
SCREAMING_SNAKE_CASE__ : Tuple = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE__ : Any = BatchEncoding()
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor(
a_ , return_tensors=a_ , **a_ ).pixel_values
SCREAMING_SNAKE_CASE__ : Dict = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Dict = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def __lowercase( self : str , *a_ : List[str] , **a_ : int )-> List[Any]:
"""simple docstring"""
return self.image_processor.post_process(*a_ , **a_ )
def __lowercase( self : Tuple , *a_ : List[str] , **a_ : str )-> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*a_ , **a_ )
def __lowercase( self : Optional[Any] , *a_ : str , **a_ : Dict )-> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*a_ , **a_ )
def __lowercase( self : Optional[int] , *a_ : Tuple , **a_ : Tuple )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def __lowercase( self : Tuple , *a_ : Tuple , **a_ : Tuple )-> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
def __lowercase( self : Tuple )-> Any:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a_ , )
return self.image_processor_class
@property
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a_ , )
return self.image_processor
| 636 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def snake_case_ ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 202 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase : Dict = """\
Text data.
Second line of data."""
lowerCAmelCase : str = """file"""
@pytest.fixture(scope='''session''' )
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__SCREAMING_SNAKE_CASE: Optional[Any] = bytes(UpperCamelCase__ , '''utf-8''' )
with zstd.open(UpperCamelCase__ , '''wb''' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , UpperCamelCase__ ) , '''w''' ) as f:
f.write(UpperCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__SCREAMING_SNAKE_CASE: List[str] = input_paths[compression_format]
__SCREAMING_SNAKE_CASE: List[Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: List[str] = DownloadConfig(cache_dir=UpperCamelCase__ , extract_compressed_file=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Tuple = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
with open(UpperCamelCase__ ) as f:
__SCREAMING_SNAKE_CASE: Optional[Any] = f.read()
with open(UpperCamelCase__ ) as f:
__SCREAMING_SNAKE_CASE: Dict = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = '''custom_cache'''
__SCREAMING_SNAKE_CASE: Optional[int] = '''custom_extracted_dir'''
__SCREAMING_SNAKE_CASE: List[Any] = tmp_path / '''custom_extracted_path'''
if default_extracted:
__SCREAMING_SNAKE_CASE: int = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , UpperCamelCase__ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(UpperCamelCase__ ) )
__SCREAMING_SNAKE_CASE: Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__SCREAMING_SNAKE_CASE: Dict = xz_file
__SCREAMING_SNAKE_CASE: Optional[Any] = (
DownloadConfig(extract_compressed_file=UpperCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=UpperCamelCase__ )
)
__SCREAMING_SNAKE_CASE: int = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
assert Path(UpperCamelCase__ ).parent.parts[-2:] == expected
def lowerCAmelCase ( UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = str(Path(UpperCamelCase__ ).resolve() )
assert cached_path(UpperCamelCase__ ) == text_file
# relative path
__SCREAMING_SNAKE_CASE: Optional[int] = str(Path(UpperCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(UpperCamelCase__ ) == text_file
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
# relative path
__SCREAMING_SNAKE_CASE: int = '''./__missing_file__.txt'''
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(UpperCamelCase__ ) as f:
__SCREAMING_SNAKE_CASE: Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , UpperCamelCase__ )
def lowerCAmelCase ( ) -> Any:
"""simple docstring"""
with pytest.raises(UpperCamelCase__ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(UpperCamelCase__ ):
http_get('''https://huggingface.co''' , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Dict ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(UpperCamelCase__ ):
ftp_get('''ftp://huggingface.co''' , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(UpperCamelCase__ ):
fsspec_get('''s3://huggingface.co''' , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
fsspec_head('''s3://huggingface.co''' )
| 202 | 1 |
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE_ ( snake_case_ : str ) -> str:
SCREAMING_SNAKE_CASE : int = ''
try:
with open(snake_case_ , 'rb' ) as binary_file:
SCREAMING_SNAKE_CASE : Optional[Any] = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE : List[Any] = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def SCREAMING_SNAKE_CASE_ ( snake_case_ : str ) -> str:
SCREAMING_SNAKE_CASE : str = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE : Tuple = '', ''
SCREAMING_SNAKE_CASE : int = len(snake_case_ )
for i in range(len(snake_case_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE : str = lexicon[curr_string]
result += last_match_id
SCREAMING_SNAKE_CASE : Dict = last_match_id + '0'
if math.loga(snake_case_ ).is_integer():
SCREAMING_SNAKE_CASE : List[Any] = {}
for curr_key in list(snake_case_ ):
SCREAMING_SNAKE_CASE : Optional[int] = lexicon.pop(snake_case_ )
SCREAMING_SNAKE_CASE : str = new_lex
SCREAMING_SNAKE_CASE : int = last_match_id + '1'
index += 1
SCREAMING_SNAKE_CASE : List[Any] = ''
return result
def SCREAMING_SNAKE_CASE_ ( snake_case_ : str , snake_case_ : str ) -> None:
SCREAMING_SNAKE_CASE : str = 8
try:
with open(snake_case_ , 'wb' ) as opened_file:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case_ ) , snake_case_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(snake_case_ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def SCREAMING_SNAKE_CASE_ ( snake_case_ : str ) -> str:
SCREAMING_SNAKE_CASE : Optional[int] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
SCREAMING_SNAKE_CASE : Dict = data_bits[counter:]
SCREAMING_SNAKE_CASE : Dict = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE_ ( snake_case_ : str , snake_case_ : str ) -> None:
SCREAMING_SNAKE_CASE : Dict = read_file_binary(snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] = remove_prefix(snake_case_ )
SCREAMING_SNAKE_CASE : List[str] = decompress_data(snake_case_ )
write_file_binary(snake_case_ , snake_case_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 720 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('KT')
__UpperCAmelCase = TypeVar('VT')
class _a ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self ,__SCREAMING_SNAKE_CASE = "root" ,__SCREAMING_SNAKE_CASE = None ):
SCREAMING_SNAKE_CASE : Optional[Any] = key
SCREAMING_SNAKE_CASE : Optional[Any] = value
SCREAMING_SNAKE_CASE : list[Node[KT, VT]] = []
def __repr__( self ):
return f"""Node({self.key}: {self.value})"""
@property
def __a ( self ):
return len(self.forward )
class _a ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self ,__SCREAMING_SNAKE_CASE = 0.5 ,__SCREAMING_SNAKE_CASE = 16 ):
SCREAMING_SNAKE_CASE : Node[KT, VT] = Node[KT, VT]()
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Optional[Any] = p
SCREAMING_SNAKE_CASE : Dict = max_level
def __str__( self ):
SCREAMING_SNAKE_CASE : Union[str, Any] = list(self )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return f"""SkipList(level={self.level})"""
SCREAMING_SNAKE_CASE : Optional[Any] = max((len(str(__SCREAMING_SNAKE_CASE ) ) for item in items) ,default=4 )
SCREAMING_SNAKE_CASE : List[str] = max(__SCREAMING_SNAKE_CASE ,4 ) + 4
SCREAMING_SNAKE_CASE : List[Any] = self.head
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[int] = node.forward.copy()
lines.append(f"""[{node.key}]""".ljust(__SCREAMING_SNAKE_CASE ,'-' ) + '* ' * len(__SCREAMING_SNAKE_CASE ) )
lines.append(' ' * label_size + '| ' * len(__SCREAMING_SNAKE_CASE ) )
while len(node.forward ) != 0:
SCREAMING_SNAKE_CASE : str = node.forward[0]
lines.append(
f"""[{node.key}]""".ljust(__SCREAMING_SNAKE_CASE ,'-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : Optional[Any] = node.forward
lines.append('None'.ljust(__SCREAMING_SNAKE_CASE ) + '* ' * len(__SCREAMING_SNAKE_CASE ) )
return f"""SkipList(level={self.level})\n""" + "\n".join(__SCREAMING_SNAKE_CASE )
def __iter__( self ):
SCREAMING_SNAKE_CASE : Tuple = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
SCREAMING_SNAKE_CASE : Any = node.forward[0]
def __a ( self ):
SCREAMING_SNAKE_CASE : Dict = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __a ( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : str = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
SCREAMING_SNAKE_CASE : Dict = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__SCREAMING_SNAKE_CASE )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __a ( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self._locate_node(__SCREAMING_SNAKE_CASE )
if node is not None:
for i, update_node in enumerate(__SCREAMING_SNAKE_CASE ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
SCREAMING_SNAKE_CASE : Dict = node.forward[i]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = update_node.forward[:i]
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._locate_node(__SCREAMING_SNAKE_CASE )
if node is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 ,__SCREAMING_SNAKE_CASE ):
update_vector.append(self.head )
SCREAMING_SNAKE_CASE : str = level
SCREAMING_SNAKE_CASE : List[str] = Node(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : List[Any] = new_node
def __a ( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._locate_node(__SCREAMING_SNAKE_CASE )
if node is not None:
return node.value
return None
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Any = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
SCREAMING_SNAKE_CASE : int = skip_list.head
SCREAMING_SNAKE_CASE : List[str] = {}
while node.level != 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = node.forward[0]
SCREAMING_SNAKE_CASE : List[str] = node.value
assert len(snake_case_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
SCREAMING_SNAKE_CASE : List[str] = skip_list.head
SCREAMING_SNAKE_CASE : str = {}
while node.level != 0:
SCREAMING_SNAKE_CASE : int = node.forward[0]
SCREAMING_SNAKE_CASE : Any = node.value
if len(snake_case_ ) != 4:
print()
assert len(snake_case_ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def SCREAMING_SNAKE_CASE_ ( ) -> str:
SCREAMING_SNAKE_CASE : Tuple = SkipList()
assert skip_list.find('Some key' ) is None
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Dict = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
SCREAMING_SNAKE_CASE : int = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
SCREAMING_SNAKE_CASE : Optional[int] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(snake_case_ : Optional[int] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(snake_case_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
def is_sorted(snake_case_ : int ):
return all(next_item >= item for item, next_item in zip(snake_case_ , lst[1:] ) )
SCREAMING_SNAKE_CASE : Optional[int] = SkipList()
for i in range(10 ):
skip_list.insert(snake_case_ , snake_case_ )
assert is_sorted(list(snake_case_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(snake_case_ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(snake_case_ ) )
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : List[str] = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 220 | 0 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowerCAmelCase : List[Any] = Dataset.from_dict(__magic_name__ )
return dataset
class __magic_name__ ( snake_case ):
def _A ( self : Optional[int] ):
lowerCAmelCase : List[Any] = get_dataset()
lowerCAmelCase : Dict = make_duplicate_clusters(lowerCamelCase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self : Tuple ):
lowerCAmelCase : int = get_dataset()
lowerCAmelCase , lowerCAmelCase : Any = deduplicate_dataset(lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) , 2 )
print(lowerCamelCase__ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , lowerCamelCase__ )
| 348 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE : Optional[int] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 348 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Union[str, Any] = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
__snake_case :Tuple = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_28,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_42,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(a__ ) , a__ )
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a__ ) , x.transpose() ) )
__snake_case :List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :Optional[int] = np.random.randn(3 , 4 )
__snake_case :Tuple = torch.tensor(a__ )
self.assertTrue(np.allclose(transpose(a__ ) , transpose(a__ ).numpy() ) )
__snake_case :Dict = np.random.randn(3 , 4 , 5 )
__snake_case :Union[str, Any] = torch.tensor(a__ )
self.assertTrue(np.allclose(transpose(a__ , axes=(1, 2, 0) ) , transpose(a__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :Union[str, Any] = np.random.randn(3 , 4 )
__snake_case :List[Any] = tf.constant(a__ )
self.assertTrue(np.allclose(transpose(a__ ) , transpose(a__ ).numpy() ) )
__snake_case :List[Any] = np.random.randn(3 , 4 , 5 )
__snake_case :Optional[Any] = tf.constant(a__ )
self.assertTrue(np.allclose(transpose(a__ , axes=(1, 2, 0) ) , transpose(a__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :List[Any] = np.random.randn(3 , 4 )
__snake_case :List[str] = jnp.array(a__ )
self.assertTrue(np.allclose(transpose(a__ ) , np.asarray(transpose(a__ ) ) ) )
__snake_case :List[Any] = np.random.randn(3 , 4 , 5 )
__snake_case :Optional[Any] = jnp.array(a__ )
self.assertTrue(np.allclose(transpose(a__ , axes=(1, 2, 0) ) , np.asarray(transpose(a__ , axes=(1, 2, 0) ) ) ) )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a__ , (4, 3) ) , np.reshape(a__ , (4, 3) ) ) )
__snake_case :List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a__ , (12, 5) ) , np.reshape(a__ , (12, 5) ) ) )
@require_torch
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :List[str] = np.random.randn(3 , 4 )
__snake_case :Optional[Any] = torch.tensor(a__ )
self.assertTrue(np.allclose(reshape(a__ , (4, 3) ) , reshape(a__ , (4, 3) ).numpy() ) )
__snake_case :Any = np.random.randn(3 , 4 , 5 )
__snake_case :Dict = torch.tensor(a__ )
self.assertTrue(np.allclose(reshape(a__ , (12, 5) ) , reshape(a__ , (12, 5) ).numpy() ) )
@require_tf
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :List[Any] = np.random.randn(3 , 4 )
__snake_case :int = tf.constant(a__ )
self.assertTrue(np.allclose(reshape(a__ , (4, 3) ) , reshape(a__ , (4, 3) ).numpy() ) )
__snake_case :Optional[Any] = np.random.randn(3 , 4 , 5 )
__snake_case :Any = tf.constant(a__ )
self.assertTrue(np.allclose(reshape(a__ , (12, 5) ) , reshape(a__ , (12, 5) ).numpy() ) )
@require_flax
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :Optional[Any] = np.random.randn(3 , 4 )
__snake_case :Any = jnp.array(a__ )
self.assertTrue(np.allclose(reshape(a__ , (4, 3) ) , np.asarray(reshape(a__ , (4, 3) ) ) ) )
__snake_case :Optional[int] = np.random.randn(3 , 4 , 5 )
__snake_case :Dict = jnp.array(a__ )
self.assertTrue(np.allclose(reshape(a__ , (12, 5) ) , np.asarray(reshape(a__ , (12, 5) ) ) ) )
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :int = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a__ ) , np.squeeze(a__ ) ) )
__snake_case :List[str] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a__ , axis=2 ) , np.squeeze(a__ , axis=2 ) ) )
@require_torch
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :List[str] = np.random.randn(1 , 3 , 4 )
__snake_case :Optional[int] = torch.tensor(a__ )
self.assertTrue(np.allclose(squeeze(a__ ) , squeeze(a__ ).numpy() ) )
__snake_case :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
__snake_case :Union[str, Any] = torch.tensor(a__ )
self.assertTrue(np.allclose(squeeze(a__ , axis=2 ) , squeeze(a__ , axis=2 ).numpy() ) )
@require_tf
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :List[Any] = np.random.randn(1 , 3 , 4 )
__snake_case :Tuple = tf.constant(a__ )
self.assertTrue(np.allclose(squeeze(a__ ) , squeeze(a__ ).numpy() ) )
__snake_case :Optional[int] = np.random.randn(1 , 4 , 1 , 5 )
__snake_case :int = tf.constant(a__ )
self.assertTrue(np.allclose(squeeze(a__ , axis=2 ) , squeeze(a__ , axis=2 ).numpy() ) )
@require_flax
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :List[Any] = np.random.randn(1 , 3 , 4 )
__snake_case :Union[str, Any] = jnp.array(a__ )
self.assertTrue(np.allclose(squeeze(a__ ) , np.asarray(squeeze(a__ ) ) ) )
__snake_case :Optional[int] = np.random.randn(1 , 4 , 1 , 5 )
__snake_case :Union[str, Any] = jnp.array(a__ )
self.assertTrue(np.allclose(squeeze(a__ , axis=2 ) , np.asarray(squeeze(a__ , axis=2 ) ) ) )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a__ , axis=1 ) , np.expand_dims(a__ , axis=1 ) ) )
@require_torch
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Any = np.random.randn(3 , 4 )
__snake_case :Any = torch.tensor(a__ )
self.assertTrue(np.allclose(expand_dims(a__ , axis=1 ) , expand_dims(a__ , axis=1 ).numpy() ) )
@require_tf
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Optional[int] = np.random.randn(3 , 4 )
__snake_case :List[str] = tf.constant(a__ )
self.assertTrue(np.allclose(expand_dims(a__ , axis=1 ) , expand_dims(a__ , axis=1 ).numpy() ) )
@require_flax
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :int = np.random.randn(3 , 4 )
__snake_case :int = jnp.array(a__ )
self.assertTrue(np.allclose(expand_dims(a__ , axis=1 ) , np.asarray(expand_dims(a__ , axis=1 ) ) ) )
| 291 |
import numpy as np
import qiskit
def UpperCamelCase ( snake_case__ : int = 8 ,snake_case__ : int | None = None ):
'''simple docstring'''
__snake_case :Tuple = np.random.default_rng(seed=snake_case__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__snake_case :Union[str, Any] = 6 * key_len
# Measurement basis for Alice's qubits.
__snake_case :int = rng.integers(2 ,size=snake_case__ )
# The set of states Alice will prepare.
__snake_case :Dict = rng.integers(2 ,size=snake_case__ )
# Measurement basis for Bob's qubits.
__snake_case :str = rng.integers(2 ,size=snake_case__ )
# Quantum Circuit to simulate BB84
__snake_case :Optional[int] = qiskit.QuantumCircuit(snake_case__ ,name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case__ ):
if alice_state[index] == 1:
bbaa_circ.x(snake_case__ )
if alice_basis[index] == 1:
bbaa_circ.h(snake_case__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case__ ):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__snake_case :Optional[int] = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__snake_case :List[Any] = qiskit.execute(snake_case__ ,snake_case__ ,shots=1 ,seed_simulator=snake_case__ )
# Returns the result of measurement.
__snake_case :Tuple = job.result().get_counts(snake_case__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__snake_case :Any = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case__ ,snake_case__ ,snake_case__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__snake_case :Dict = gen_key[:key_len] if len(snake_case__ ) >= key_len else gen_key.ljust(snake_case__ ,"""0""" )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 291 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.