code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from math import sqrt
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case__ ( __lowerCamelCase : int = 10001 ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =0
lowerCamelCase__ : Any =1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'{solution() = }')
| 625 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
_a = ['onnx']
def __init__( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : str )-> Optional[int]:
requires_backends(self, ['''onnx'''] )
@classmethod
def snake_case ( cls : List[str], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
@classmethod
def snake_case ( cls : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : Tuple )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
| 625 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int, lowerCamelCase : Any, lowerCamelCase : Tuple=13, lowerCamelCase : str=[30, 30], lowerCamelCase : Optional[Any]=2, lowerCamelCase : Any=3, lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : Union[str, Any]=32, lowerCamelCase : Optional[Any]=5, lowerCamelCase : List[str]=4, lowerCamelCase : Any=37, lowerCamelCase : List[Any]="gelu", lowerCamelCase : int=0.1, lowerCamelCase : Dict=0.1, lowerCamelCase : List[Any]=10, lowerCamelCase : Tuple=0.02, lowerCamelCase : int=3, lowerCamelCase : Optional[Any]=None, lowerCamelCase : str=8, lowerCamelCase : Tuple=10, )-> Optional[int]:
lowerCamelCase__ : Union[str, Any] =parent
lowerCamelCase__ : List[Any] =batch_size
lowerCamelCase__ : Optional[Any] =image_size
lowerCamelCase__ : str =patch_size
lowerCamelCase__ : Union[str, Any] =num_channels
lowerCamelCase__ : Optional[int] =is_training
lowerCamelCase__ : Union[str, Any] =use_labels
lowerCamelCase__ : Optional[int] =hidden_size
lowerCamelCase__ : List[Any] =num_hidden_layers
lowerCamelCase__ : Tuple =num_attention_heads
lowerCamelCase__ : Tuple =intermediate_size
lowerCamelCase__ : List[Any] =hidden_act
lowerCamelCase__ : Tuple =hidden_dropout_prob
lowerCamelCase__ : List[str] =attention_probs_dropout_prob
lowerCamelCase__ : Any =type_sequence_label_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Optional[int] =num_labels
lowerCamelCase__ : int =scope
lowerCamelCase__ : Dict =n_targets
lowerCamelCase__ : List[str] =num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase__ : Any =(image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase__ : Union[str, Any] =num_patches + 1 + self.num_detection_tokens
def snake_case ( self : List[str] )-> List[Any]:
lowerCamelCase__ : Any =floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase__ : Any =None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase__ : Tuple =[]
for i in range(self.batch_size ):
lowerCamelCase__ : Tuple ={}
lowerCamelCase__ : int =torch.randint(
high=self.num_labels, size=(self.n_targets,), device=lowerCamelCase )
lowerCamelCase__ : Dict =torch.rand(self.n_targets, 4, device=lowerCamelCase )
labels.append(lowerCamelCase )
lowerCamelCase__ : List[str] =self.get_config()
return config, pixel_values, labels
def snake_case ( self : Dict )-> int:
return YolosConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, num_detection_tokens=self.num_detection_tokens, num_labels=self.num_labels, )
def snake_case ( self : int, lowerCamelCase : List[str], lowerCamelCase : Optional[int], lowerCamelCase : List[str] )-> Dict:
lowerCamelCase__ : List[Any] =YolosModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : str =model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Optional[int], lowerCamelCase : List[Any], lowerCamelCase : List[str], lowerCamelCase : Optional[Any] )-> List[Any]:
lowerCamelCase__ : List[Any] =YolosForObjectDetection(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : str =model(pixel_values=lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase__ : List[str] =model(pixel_values=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : List[Any] )-> str:
lowerCamelCase__ : int =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str =config_and_inputs
lowerCamelCase__ : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_a = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_a = False
_a = False
_a = False
_a = False
def snake_case ( self : Tuple, lowerCamelCase : List[Any], lowerCamelCase : Dict, lowerCamelCase : List[str]=False )-> Optional[Any]:
lowerCamelCase__ : Any =super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase__ : int =[]
for i in range(self.model_tester.batch_size ):
lowerCamelCase__ : Union[str, Any] ={}
lowerCamelCase__ : List[str] =torch.ones(
size=(self.model_tester.n_targets,), device=lowerCamelCase, dtype=torch.long )
lowerCamelCase__ : str =torch.ones(
self.model_tester.n_targets, 4, device=lowerCamelCase, dtype=torch.float )
labels.append(lowerCamelCase )
lowerCamelCase__ : List[Any] =labels
return inputs_dict
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ : str =YolosModelTester(self )
lowerCamelCase__ : Optional[int] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Optional[int] )-> Optional[Any]:
self.config_tester.run_common_tests()
def snake_case ( self : List[str] )-> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCamelCase__ : Optional[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase, nn.Linear ) )
def snake_case ( self : str )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] =model_class(lowerCamelCase )
lowerCamelCase__ : Dict =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple =[*signature.parameters.keys()]
lowerCamelCase__ : Any =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def snake_case ( self : List[str] )-> int:
lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def snake_case ( self : Dict )-> int:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[int] =True
# in YOLOS, the seq_len is different
lowerCamelCase__ : List[str] =self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] =True
lowerCamelCase__ : Any =False
lowerCamelCase__ : Any =True
lowerCamelCase__ : Optional[int] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : Dict =outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ : List[Any] =True
lowerCamelCase__ : List[Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Any =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
lowerCamelCase__ : Union[str, Any] =len(lowerCamelCase )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] =True
lowerCamelCase__ : Optional[Any] =True
lowerCamelCase__ : List[str] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[int] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : int =1
self.assertEqual(out_len + added_hidden_states, len(lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] =outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def snake_case ( self : List[Any] )-> Union[str, Any]:
def check_hidden_states_output(lowerCamelCase : str, lowerCamelCase : Dict, lowerCamelCase : str ):
lowerCamelCase__ : Optional[Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : int =outputs.hidden_states
lowerCamelCase__ : Tuple =getattr(
self.model_tester, '''expected_num_hidden_layers''', self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
# YOLOS has a different seq_length
lowerCamelCase__ : Optional[Any] =self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
lowerCamelCase__ , lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Tuple =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : List[str] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> str:
lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*lowerCamelCase )
@slow
def snake_case ( self : Optional[Any] )-> Dict:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Any =YolosModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case ( self : Tuple )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def snake_case ( self : Union[str, Any] )-> str:
lowerCamelCase__ : str =YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(lowerCamelCase )
lowerCamelCase__ : Tuple =self.default_image_processor
lowerCamelCase__ : List[str] =prepare_img()
lowerCamelCase__ : Optional[Any] =image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[str] =model(inputs.pixel_values )
# verify outputs
lowerCamelCase__ : Tuple =torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : Optional[int] =torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]], device=lowerCamelCase, )
lowerCamelCase__ : int =torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]], device=lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], lowerCamelCase, atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], lowerCamelCase, atol=1E-4 ) )
# verify postprocessing
lowerCamelCase__ : Union[str, Any] =image_processor.post_process_object_detection(
lowerCamelCase, threshold=0.3, target_sizes=[image.size[::-1]] )[0]
lowerCamelCase__ : Optional[Any] =torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(lowerCamelCase )
lowerCamelCase__ : Tuple =[75, 75, 17, 63, 17]
lowerCamelCase__ : Tuple =torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(lowerCamelCase )
self.assertEqual(len(results['''scores'''] ), 5 )
self.assertTrue(torch.allclose(results['''scores'''], lowerCamelCase, atol=1E-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist(), lowerCamelCase )
self.assertTrue(torch.allclose(results['''boxes'''][0, :], lowerCamelCase ) )
| 625 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =x
lowerCamelCase__ : Any =y
for step in range(__lowerCamelCase ): # noqa: B007
lowerCamelCase__ : List[Any] =a * a - b * b + x
lowerCamelCase__ : Optional[int] =2 * a * b + y
lowerCamelCase__ : Union[str, Any] =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) )
def snake_case__ ( __lowerCamelCase : int = 800 , __lowerCamelCase : int = 600 , __lowerCamelCase : float = -0.6 , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 3.2 , __lowerCamelCase : int = 50 , __lowerCamelCase : bool = True , ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =Image.new('''RGB''' , (image_width, image_height) )
lowerCamelCase__ : Optional[int] =img.load()
# loop through the image-coordinates
for image_x in range(__lowerCamelCase ):
for image_y in range(__lowerCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase__ : Optional[Any] =figure_width / image_width * image_height
lowerCamelCase__ : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase__ : Optional[int] =figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase__ : Any =get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase__ : int =get_color_coded_rgb(__lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] =get_black_and_white_rgb(__lowerCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 625 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
assert (
isinstance(__lowerCamelCase , __lowerCamelCase ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
lowerCamelCase__ , lowerCamelCase__ : int =1, 1
for _ in range(number_of_steps - 1 ):
lowerCamelCase__ , lowerCamelCase__ : List[Any] =current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =VideoMAEConfig()
set_architecture_configs(__lowerCamelCase , __lowerCamelCase )
if "finetuned" not in model_name:
lowerCamelCase__ : int =False
if "finetuned" in model_name:
lowerCamelCase__ : str ='''huggingface/label-files'''
if "kinetics" in model_name:
lowerCamelCase__ : List[Any] =400
lowerCamelCase__ : Optional[int] ='''kinetics400-id2label.json'''
elif "ssv2" in model_name:
lowerCamelCase__ : Tuple =174
lowerCamelCase__ : Optional[Any] ='''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : List[Any] ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict =idalabel
lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()}
return config
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if "small" in model_name:
lowerCamelCase__ : Optional[Any] =384
lowerCamelCase__ : List[Any] =1536
lowerCamelCase__ : int =12
lowerCamelCase__ : Dict =16
lowerCamelCase__ : List[Any] =12
lowerCamelCase__ : Optional[Any] =3
lowerCamelCase__ : Union[str, Any] =192
lowerCamelCase__ : str =768
elif "large" in model_name:
lowerCamelCase__ : Union[str, Any] =1024
lowerCamelCase__ : str =4096
lowerCamelCase__ : int =24
lowerCamelCase__ : Dict =16
lowerCamelCase__ : Union[str, Any] =12
lowerCamelCase__ : List[Any] =8
lowerCamelCase__ : int =512
lowerCamelCase__ : Optional[Any] =2048
elif "huge" in model_name:
lowerCamelCase__ : Optional[int] =1280
lowerCamelCase__ : Optional[int] =5120
lowerCamelCase__ : List[Any] =32
lowerCamelCase__ : List[Any] =16
lowerCamelCase__ : Optional[Any] =12
lowerCamelCase__ : Dict =8
lowerCamelCase__ : List[Any] =640
lowerCamelCase__ : Any =2560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
if "encoder." in name:
lowerCamelCase__ : Optional[int] =name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
lowerCamelCase__ : List[Any] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase__ : Any =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ : List[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowerCamelCase__ : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : int =name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowerCamelCase__ : Any =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowerCamelCase__ : Any =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : str =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
lowerCamelCase__ : List[str] =name.replace('''head''' , '''classifier''' )
return name
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase )
if key.startswith('''encoder.''' ):
lowerCamelCase__ : Optional[int] =key.replace('''encoder.''' , '''''' )
if "qkv" in key:
lowerCamelCase__ : Any =key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
lowerCamelCase__ : Tuple =config.decoder_hidden_size
lowerCamelCase__ : str =int(key_split[2] )
lowerCamelCase__ : Any ='''decoder.decoder_layers.'''
if "weight" in key:
lowerCamelCase__ : List[Any] =val[:dim, :]
lowerCamelCase__ : Any =val[dim : dim * 2, :]
lowerCamelCase__ : Dict =val[-dim:, :]
else:
lowerCamelCase__ : Optional[Any] =config.hidden_size
lowerCamelCase__ : Optional[Any] =int(key_split[1] )
lowerCamelCase__ : str ='''videomae.encoder.layer.'''
if "weight" in key:
lowerCamelCase__ : int =val[:dim, :]
lowerCamelCase__ : Tuple =val[dim : dim * 2, :]
lowerCamelCase__ : List[Any] =val[-dim:, :]
else:
lowerCamelCase__ : int =val
return orig_state_dict
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCamelCase__ : Optional[Any] =np.load(__lowerCamelCase )
return list(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : str =get_videomae_config(__lowerCamelCase )
if "finetuned" in model_name:
lowerCamelCase__ : Tuple =VideoMAEForVideoClassification(__lowerCamelCase )
else:
lowerCamelCase__ : int =VideoMAEForPreTraining(__lowerCamelCase )
# download original checkpoint, hosted on Google Drive
lowerCamelCase__ : Union[str, Any] ='''pytorch_model.bin'''
gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )
if "model" in files:
lowerCamelCase__ : Dict =files['''model''']
else:
lowerCamelCase__ : str =files['''module''']
lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify model on basic input
lowerCamelCase__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCamelCase__ : int =prepare_video()
lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' )
if "finetuned" not in model_name:
lowerCamelCase__ : Tuple =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase )
lowerCamelCase__ : int =model(**__lowerCamelCase )
lowerCamelCase__ : Dict =outputs.logits
lowerCamelCase__ : List[str] =[
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCamelCase__ : int =torch.Size([1, 174] )
lowerCamelCase__ : Dict =torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
lowerCamelCase__ : List[str] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
lowerCamelCase__ : List[Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[str] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCamelCase__ : str =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[Any] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : Optional[int] =torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCamelCase__ : List[str] =torch.Size([1, 400] )
lowerCamelCase__ : Dict =torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCamelCase__ : str =torch.Size([1, 400] )
lowerCamelCase__ : Any =torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
lowerCamelCase__ : Tuple =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCamelCase__ : Optional[int] =torch.Size([1, 174] )
lowerCamelCase__ : Any =torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : str =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCamelCase__ : str =torch.Size([1, 174] )
lowerCamelCase__ : int =torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCamelCase__ : str =outputs.loss
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__lowerCamelCase , organization='''nielsr''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 625 | 1 |
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Tuple = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'autoformer'
_a = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : str, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : str = "student_t", lowerCamelCase : str = "nll", lowerCamelCase : int = 1, lowerCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7], lowerCamelCase : bool = True, lowerCamelCase : int = 0, lowerCamelCase : int = 0, lowerCamelCase : int = 0, lowerCamelCase : int = 0, lowerCamelCase : Optional[List[int]] = None, lowerCamelCase : Optional[List[int]] = None, lowerCamelCase : int = 64, lowerCamelCase : int = 2, lowerCamelCase : int = 2, lowerCamelCase : int = 2, lowerCamelCase : int = 2, lowerCamelCase : int = 32, lowerCamelCase : int = 32, lowerCamelCase : str = "gelu", lowerCamelCase : float = 0.1, lowerCamelCase : float = 0.1, lowerCamelCase : float = 0.1, lowerCamelCase : float = 0.1, lowerCamelCase : float = 0.1, lowerCamelCase : int = 100, lowerCamelCase : float = 0.02, lowerCamelCase : bool = True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : int = 10, lowerCamelCase : int = 25, lowerCamelCase : int = 3, **lowerCamelCase : Optional[int], )-> List[Any]:
# time series specific configuration
lowerCamelCase__ : Union[str, Any] =prediction_length
lowerCamelCase__ : List[Any] =context_length if context_length is not None else prediction_length
lowerCamelCase__ : Dict =distribution_output
lowerCamelCase__ : int =loss
lowerCamelCase__ : Any =input_size
lowerCamelCase__ : Optional[Any] =num_time_features
lowerCamelCase__ : Tuple =lags_sequence
lowerCamelCase__ : List[str] =scaling
lowerCamelCase__ : Any =num_dynamic_real_features
lowerCamelCase__ : Any =num_static_real_features
lowerCamelCase__ : Dict =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase__ : Tuple =cardinality
else:
lowerCamelCase__ : Tuple =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase__ : List[str] =embedding_dimension
else:
lowerCamelCase__ : List[str] =[min(50, (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase__ : Tuple =num_parallel_samples
# Transformer architecture configuration
lowerCamelCase__ : Any =input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase__ : Any =d_model
lowerCamelCase__ : Optional[Any] =encoder_attention_heads
lowerCamelCase__ : str =decoder_attention_heads
lowerCamelCase__ : Tuple =encoder_ffn_dim
lowerCamelCase__ : Tuple =decoder_ffn_dim
lowerCamelCase__ : int =encoder_layers
lowerCamelCase__ : Dict =decoder_layers
lowerCamelCase__ : List[str] =dropout
lowerCamelCase__ : str =attention_dropout
lowerCamelCase__ : Optional[int] =activation_dropout
lowerCamelCase__ : Optional[Any] =encoder_layerdrop
lowerCamelCase__ : Optional[int] =decoder_layerdrop
lowerCamelCase__ : int =activation_function
lowerCamelCase__ : Union[str, Any] =init_std
lowerCamelCase__ : List[str] =use_cache
# Autoformer
lowerCamelCase__ : List[Any] =label_length
lowerCamelCase__ : List[Any] =moving_average
lowerCamelCase__ : Optional[int] =autocorrelation_factor
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase )
@property
def snake_case ( self : Tuple )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 625 |
"""simple docstring"""
_lowercase : str = 0 # The first color of the flag.
_lowercase : Dict = 1 # The second color of the flag.
_lowercase : Tuple = 2 # The third color of the flag.
_lowercase : Optional[int] = (red, white, blue)
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
if not sequence:
return []
if len(__lowerCamelCase ) == 1:
return list(__lowerCamelCase )
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Dict =len(__lowerCamelCase ) - 1
lowerCamelCase__ : Tuple =0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =sequence[high], sequence[mid]
high -= 1
else:
lowerCamelCase__ : Dict =f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[Any] = input("Enter numbers separated by commas:\n").strip()
_lowercase : int = [int(item.strip()) for item in user_input.split(",")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 625 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_lowercase : Any = logging.getLogger(__name__)
_lowercase : int = tf.data.AUTOTUNE
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : int =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=__lowerCamelCase , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=__lowerCamelCase , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=__lowerCamelCase , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=__lowerCamelCase , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=__lowerCamelCase , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=__lowerCamelCase , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=__lowerCamelCase , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=__lowerCamelCase , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=__lowerCamelCase , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=__lowerCamelCase , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=__lowerCamelCase , default=1e-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=__lowerCamelCase , default=1e-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=__lowerCamelCase , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=__lowerCamelCase , default=0.15 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=__lowerCamelCase , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowerCamelCase__ : Tuple =parser.parse_args()
return args
def snake_case__ ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
try:
if args.tpu_name:
lowerCamelCase__ : int =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowerCamelCase__ : Any =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(__lowerCamelCase )
tf.tpu.experimental.initialize_tpu_system(__lowerCamelCase )
return tpu
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =0
for file in file_list:
lowerCamelCase__ : Tuple =file.split('''/''' )[-1]
lowerCamelCase__ : Dict =re.search(R'''-\d+-(\d+)\.tfrecord''' , __lowerCamelCase ).group(1 )
lowerCamelCase__ : Union[str, Any] =int(__lowerCamelCase )
num_samples += sample_count
return num_samples
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any]=None ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =count_samples(__lowerCamelCase )
lowerCamelCase__ : Dict =tf.data.Dataset.from_tensor_slices(__lowerCamelCase )
if shuffle:
lowerCamelCase__ : List[str] =dataset.shuffle(len(__lowerCamelCase ) )
lowerCamelCase__ : Dict =tf.data.TFRecordDataset(__lowerCamelCase , num_parallel_reads=__lowerCamelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowerCamelCase__ : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__lowerCamelCase ) )
lowerCamelCase__ : str =dataset.map(__lowerCamelCase , num_parallel_calls=__lowerCamelCase )
if shuffle:
assert shuffle_buffer_size is not None
lowerCamelCase__ : str =dataset.shuffle(args.shuffle_buffer_size )
lowerCamelCase__ : Any =dataset.batch(__lowerCamelCase , drop_remainder=__lowerCamelCase )
lowerCamelCase__ : str =dataset.map(__lowerCamelCase , num_parallel_calls=__lowerCamelCase )
lowerCamelCase__ : int =dataset.prefetch(__lowerCamelCase )
return dataset
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
if not args.no_tpu:
lowerCamelCase__ : str =initialize_tpu(__lowerCamelCase )
lowerCamelCase__ : str =tf.distribute.TPUStrategy(__lowerCamelCase )
else:
lowerCamelCase__ : List[str] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowerCamelCase__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer )
lowerCamelCase__ : Any =AutoConfig.from_pretrained(args.pretrained_model_config )
lowerCamelCase__ : Tuple =tokenizer.vocab_size
lowerCamelCase__ : Any =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
lowerCamelCase__ : int =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
lowerCamelCase__ : Optional[int] =count_samples(__lowerCamelCase )
lowerCamelCase__ : Optional[int] =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowerCamelCase__ : Any =steps_per_epoch * args.num_epochs
with strategy.scope():
lowerCamelCase__ : Union[str, Any] =TFAutoModelForMaskedLM.from_config(__lowerCamelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowerCamelCase__ , lowerCamelCase__ : Dict =create_optimizer(
num_train_steps=__lowerCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__lowerCamelCase , metrics=['''accuracy'''] )
def decode_fn(__lowerCamelCase : Any ):
lowerCamelCase__ : Optional[Any] ={
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__lowerCamelCase , __lowerCamelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowerCamelCase__ : Optional[int] =DataCollatorForLanguageModeling(
tokenizer=__lowerCamelCase , mlm_probability=args.mlm_probability , mlm=__lowerCamelCase , return_tensors='''tf''' )
def mask_with_collator(__lowerCamelCase : Any ):
# TF really needs an isin() function
lowerCamelCase__ : Union[str, Any] =(
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowerCamelCase__ , lowerCamelCase__ : List[Any] =data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(__lowerCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__lowerCamelCase , )
return batch
lowerCamelCase__ : Any =args.per_replica_batch_size * strategy.num_replicas_in_sync
lowerCamelCase__ : Optional[Any] =prepare_dataset(
__lowerCamelCase , decode_fn=__lowerCamelCase , mask_fn=__lowerCamelCase , batch_size=__lowerCamelCase , shuffle=__lowerCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
lowerCamelCase__ : Any =prepare_dataset(
__lowerCamelCase , decode_fn=__lowerCamelCase , mask_fn=__lowerCamelCase , batch_size=__lowerCamelCase , shuffle=__lowerCamelCase , )
lowerCamelCase__ : Any =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__lowerCamelCase ) )
model.fit(
__lowerCamelCase , validation_data=__lowerCamelCase , epochs=args.num_epochs , callbacks=__lowerCamelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_lowercase : List[Any] = parse_args()
main(args)
| 625 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = StableUnCLIPImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def snake_case ( self : List[str] )-> str:
lowerCamelCase__ : Dict =32
lowerCamelCase__ : Optional[Any] =embedder_hidden_size
# image encoding components
lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase__ : Tuple =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowerCamelCase__ : Dict =UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =DDIMScheduler(
beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =AutoencoderKL()
lowerCamelCase__ : int ={
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
lowerCamelCase__ : int =input_image * 0.5 + 0.5
lowerCamelCase__ : Dict =input_image.clamp(0, 1 )
lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase )
lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase )
inputs.update({'''image_embeds''': None} )
lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images
lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def snake_case ( self : List[str] )-> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : List[Any] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : Tuple =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : List[Any] =pipe(
lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', )
lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 625 | 1 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 42
_a = 42
_a = 0.0
_a = 1
_a = 1
_a = True
_a = False
_a = False
_a = False
_a = jnp.floataa
def snake_case ( self : Dict )-> Optional[int]:
lowerCamelCase__ : str =[]
lowerCamelCase__ : List[str] =[]
for i in range(self.num_layers ):
lowerCamelCase__ : List[str] =self.in_channels if i == 0 else self.out_channels
lowerCamelCase__ : int =FlaxResnetBlockaD(
in_channels=lowerCamelCase, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(lowerCamelCase )
lowerCamelCase__ : List[Any] =FlaxTransformeraDModel(
in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =resnets
lowerCamelCase__ : Tuple =attentions
if self.add_downsample:
lowerCamelCase__ : Any =FlaxDownsampleaD(self.out_channels, dtype=self.dtype )
def __call__( self : int, lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any]=True )-> Tuple:
lowerCamelCase__ : Optional[Any] =()
for resnet, attn in zip(self.resnets, self.attentions ):
lowerCamelCase__ : str =resnet(lowerCamelCase, lowerCamelCase, deterministic=lowerCamelCase )
lowerCamelCase__ : Any =attn(lowerCamelCase, lowerCamelCase, deterministic=lowerCamelCase )
output_states += (hidden_states,)
if self.add_downsample:
lowerCamelCase__ : List[Any] =self.downsamplers_a(lowerCamelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 42
_a = 42
_a = 0.0
_a = 1
_a = True
_a = jnp.floataa
def snake_case ( self : int )-> int:
lowerCamelCase__ : List[str] =[]
for i in range(self.num_layers ):
lowerCamelCase__ : int =self.in_channels if i == 0 else self.out_channels
lowerCamelCase__ : Tuple =FlaxResnetBlockaD(
in_channels=lowerCamelCase, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(lowerCamelCase )
lowerCamelCase__ : List[str] =resnets
if self.add_downsample:
lowerCamelCase__ : List[Any] =FlaxDownsampleaD(self.out_channels, dtype=self.dtype )
def __call__( self : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any]=True )-> Optional[int]:
lowerCamelCase__ : Union[str, Any] =()
for resnet in self.resnets:
lowerCamelCase__ : Tuple =resnet(lowerCamelCase, lowerCamelCase, deterministic=lowerCamelCase )
output_states += (hidden_states,)
if self.add_downsample:
lowerCamelCase__ : List[Any] =self.downsamplers_a(lowerCamelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 42
_a = 42
_a = 42
_a = 0.0
_a = 1
_a = 1
_a = True
_a = False
_a = False
_a = False
_a = jnp.floataa
def snake_case ( self : Tuple )-> Tuple:
lowerCamelCase__ : Any =[]
lowerCamelCase__ : Optional[int] =[]
for i in range(self.num_layers ):
lowerCamelCase__ : Optional[int] =self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowerCamelCase__ : Optional[int] =self.prev_output_channel if i == 0 else self.out_channels
lowerCamelCase__ : int =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(lowerCamelCase )
lowerCamelCase__ : str =FlaxTransformeraDModel(
in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =resnets
lowerCamelCase__ : List[Any] =attentions
if self.add_upsample:
lowerCamelCase__ : Optional[int] =FlaxUpsampleaD(self.out_channels, dtype=self.dtype )
def __call__( self : List[str], lowerCamelCase : Dict, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Dict=True )-> Optional[int]:
for resnet, attn in zip(self.resnets, self.attentions ):
# pop res hidden states
lowerCamelCase__ : str =res_hidden_states_tuple[-1]
lowerCamelCase__ : Union[str, Any] =res_hidden_states_tuple[:-1]
lowerCamelCase__ : List[str] =jnp.concatenate((hidden_states, res_hidden_states), axis=-1 )
lowerCamelCase__ : Union[str, Any] =resnet(lowerCamelCase, lowerCamelCase, deterministic=lowerCamelCase )
lowerCamelCase__ : str =attn(lowerCamelCase, lowerCamelCase, deterministic=lowerCamelCase )
if self.add_upsample:
lowerCamelCase__ : Any =self.upsamplers_a(lowerCamelCase )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 42
_a = 42
_a = 42
_a = 0.0
_a = 1
_a = True
_a = jnp.floataa
def snake_case ( self : Any )-> str:
lowerCamelCase__ : Optional[int] =[]
for i in range(self.num_layers ):
lowerCamelCase__ : Any =self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowerCamelCase__ : List[Any] =self.prev_output_channel if i == 0 else self.out_channels
lowerCamelCase__ : Tuple =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(lowerCamelCase )
lowerCamelCase__ : int =resnets
if self.add_upsample:
lowerCamelCase__ : Any =FlaxUpsampleaD(self.out_channels, dtype=self.dtype )
def __call__( self : Optional[Any], lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple=True )-> List[Any]:
for resnet in self.resnets:
# pop res hidden states
lowerCamelCase__ : Optional[int] =res_hidden_states_tuple[-1]
lowerCamelCase__ : Optional[Any] =res_hidden_states_tuple[:-1]
lowerCamelCase__ : List[Any] =jnp.concatenate((hidden_states, res_hidden_states), axis=-1 )
lowerCamelCase__ : Any =resnet(lowerCamelCase, lowerCamelCase, deterministic=lowerCamelCase )
if self.add_upsample:
lowerCamelCase__ : List[str] =self.upsamplers_a(lowerCamelCase )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 42
_a = 0.0
_a = 1
_a = 1
_a = False
_a = False
_a = jnp.floataa
def snake_case ( self : Any )-> Union[str, Any]:
# there is always at least one resnet
lowerCamelCase__ : Any =[
FlaxResnetBlockaD(
in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype, )
]
lowerCamelCase__ : int =[]
for _ in range(self.num_layers ):
lowerCamelCase__ : Union[str, Any] =FlaxTransformeraDModel(
in_channels=self.in_channels, n_heads=self.num_attention_heads, d_head=self.in_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(lowerCamelCase )
lowerCamelCase__ : str =FlaxResnetBlockaD(
in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(lowerCamelCase )
lowerCamelCase__ : str =resnets
lowerCamelCase__ : Optional[Any] =attentions
def __call__( self : Union[str, Any], lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any]=True )-> List[Any]:
lowerCamelCase__ : Any =self.resnets[0](lowerCamelCase, lowerCamelCase )
for attn, resnet in zip(self.attentions, self.resnets[1:] ):
lowerCamelCase__ : List[Any] =attn(lowerCamelCase, lowerCamelCase, deterministic=lowerCamelCase )
lowerCamelCase__ : str =resnet(lowerCamelCase, lowerCamelCase, deterministic=lowerCamelCase )
return hidden_states
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 4000000 ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b
return sum(__lowerCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 625 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
if index == r:
for j in range(__lowerCamelCase ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowerCamelCase__ : Any =arr[i]
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 , __lowerCamelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
# A temporary array to store all combination one by one
lowerCamelCase__ : Optional[int] =[0] * r
# Print all combination using temporary array 'data[]'
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 0 , __lowerCamelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_lowercase : List[Any] = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 625 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = BlenderbotSmallConfig
_a = {}
_a = 'gelu'
def __init__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=False, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : str=32, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=20, lowerCamelCase : int=2, lowerCamelCase : Any=1, lowerCamelCase : Optional[Any]=0, )-> List[str]:
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Tuple =is_training
lowerCamelCase__ : Dict =use_labels
lowerCamelCase__ : List[Any] =vocab_size
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : str =num_hidden_layers
lowerCamelCase__ : Union[str, Any] =num_attention_heads
lowerCamelCase__ : Any =intermediate_size
lowerCamelCase__ : Dict =hidden_dropout_prob
lowerCamelCase__ : List[Any] =attention_probs_dropout_prob
lowerCamelCase__ : str =max_position_embeddings
lowerCamelCase__ : Optional[int] =eos_token_id
lowerCamelCase__ : str =pad_token_id
lowerCamelCase__ : Union[str, Any] =bos_token_id
def snake_case ( self : Any )-> Any:
lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCamelCase__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Any =tf.concat([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : int =self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCamelCase__ : Optional[int] =prepare_blenderbot_small_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return config, inputs_dict
def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Any )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder()
lowerCamelCase__ : List[Any] =inputs_dict['''input_ids''']
lowerCamelCase__ : Optional[int] =input_ids[:1, :]
lowerCamelCase__ : str =inputs_dict['''attention_mask'''][:1, :]
lowerCamelCase__ : Union[str, Any] =inputs_dict['''head_mask''']
lowerCamelCase__ : Optional[Any] =1
# first forward pass
lowerCamelCase__ : Dict =model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : List[str] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowerCamelCase__ : List[str] =tf.concat([input_ids, next_tokens], axis=-1 )
lowerCamelCase__ : str =tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase, attention_mask=lowerCamelCase )[0]
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : Tuple =int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowerCamelCase__ : int =output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : List[str] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-3 )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : List[str] =tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : str =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_a = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_a = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a = True
_a = False
_a = False
def snake_case ( self : Any )-> str:
lowerCamelCase__ : Tuple =TFBlenderbotSmallModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase )
def snake_case ( self : Any )-> Optional[int]:
self.config_tester.run_common_tests()
def snake_case ( self : int )-> str:
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
_a = 'facebook/blenderbot_small-90M'
@cached_property
def snake_case ( self : Any )-> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Dict =self.tokenizer(self.src_text, return_tensors='''tf''' )
lowerCamelCase__ : Any =self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase, )
lowerCamelCase__ : Any =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 625 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def snake_case ( *lowerCamelCase : Optional[Any], **lowerCamelCase : Any )-> List[Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case ( self : Tuple, lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : str )-> str:
lowerCamelCase__ : Dict =ObjectDetectionPipeline(model=lowerCamelCase, image_processor=lowerCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case ( self : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Tuple )-> List[str]:
lowerCamelCase__ : List[Any] =object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''', threshold=0.0 )
self.assertGreater(len(lowerCamelCase ), 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase, {
'''score''': ANY(lowerCamelCase ),
'''label''': ANY(lowerCamelCase ),
'''box''': {'''xmin''': ANY(lowerCamelCase ), '''ymin''': ANY(lowerCamelCase ), '''xmax''': ANY(lowerCamelCase ), '''ymax''': ANY(lowerCamelCase )},
}, )
import datasets
lowerCamelCase__ : int =datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''', '''image''', split='''test''' )
lowerCamelCase__ : Optional[Any] =[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
lowerCamelCase__ : Dict =object_detector(lowerCamelCase, threshold=0.0 )
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(lowerCamelCase ), 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase, {
'''score''': ANY(lowerCamelCase ),
'''label''': ANY(lowerCamelCase ),
'''box''': {'''xmin''': ANY(lowerCamelCase ), '''ymin''': ANY(lowerCamelCase ), '''xmax''': ANY(lowerCamelCase ), '''ymax''': ANY(lowerCamelCase )},
}, )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def snake_case ( self : List[str] )-> Dict:
pass
@require_torch
def snake_case ( self : int )-> Union[str, Any]:
lowerCamelCase__ : int ='''hf-internal-testing/tiny-detr-mobilenetsv3'''
lowerCamelCase__ : List[Any] =AutoModelForObjectDetection.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =AutoFeatureExtractor.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =ObjectDetectionPipeline(model=lowerCamelCase, feature_extractor=lowerCamelCase )
lowerCamelCase__ : str =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''', threshold=0.0 )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=4 ), [
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
], )
lowerCamelCase__ : Union[str, Any] =object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
], threshold=0.0, )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=4 ), [
[
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
], )
@require_torch
@slow
def snake_case ( self : Dict )-> Union[str, Any]:
lowerCamelCase__ : List[Any] ='''facebook/detr-resnet-50'''
lowerCamelCase__ : Dict =AutoModelForObjectDetection.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =AutoFeatureExtractor.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Any =ObjectDetectionPipeline(model=lowerCamelCase, feature_extractor=lowerCamelCase )
lowerCamelCase__ : Tuple =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=4 ), [
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
], )
lowerCamelCase__ : Tuple =object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=4 ), [
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
], )
@require_torch
@slow
def snake_case ( self : Optional[Any] )-> List[Any]:
lowerCamelCase__ : List[Any] ='''facebook/detr-resnet-50'''
lowerCamelCase__ : Union[str, Any] =pipeline('''object-detection''', model=lowerCamelCase )
lowerCamelCase__ : Tuple =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=4 ), [
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
], )
lowerCamelCase__ : Dict =object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=4 ), [
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
], )
@require_torch
@slow
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ : Tuple =0.9_985
lowerCamelCase__ : int ='''facebook/detr-resnet-50'''
lowerCamelCase__ : Optional[int] =pipeline('''object-detection''', model=lowerCamelCase )
lowerCamelCase__ : Any =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''', threshold=lowerCamelCase )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=4 ), [
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
], )
@require_torch
@require_pytesseract
@slow
def snake_case ( self : Union[str, Any] )-> Union[str, Any]:
lowerCamelCase__ : Union[str, Any] ='''Narsil/layoutlmv3-finetuned-funsd'''
lowerCamelCase__ : Dict =0.9_993
lowerCamelCase__ : List[str] =pipeline('''object-detection''', model=lowerCamelCase, threshold=lowerCamelCase )
lowerCamelCase__ : List[Any] =object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=4 ), [
{'''score''': 0.9_993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.9_993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
], )
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ):
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
# Base Case
if curr_ind == len(__lowerCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__lowerCamelCase ) ):
if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Insert current vertex into path as next transition
lowerCamelCase__ : Tuple =next_ver
# Validate created path
if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase__ : int =-1
return False
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase__ : Union[str, Any] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
| 625 | 1 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_lowercase : Tuple = True
except (ImportError, AttributeError):
_lowercase : Tuple = object
def snake_case__ ( *__lowerCamelCase : str , **__lowerCamelCase : Tuple ):
"""simple docstring"""
pass
_lowercase : Optional[Any] = False
_lowercase : List[str] = logging.get_logger("transformers-cli/serving")
def snake_case__ ( __lowerCamelCase : Namespace ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__lowerCamelCase , args.host , args.port , args.workers )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 42
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 42
_a = 42
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 42
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 42
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def snake_case ( lowerCamelCase : ArgumentParser )-> int:
lowerCamelCase__ : Optional[int] =parser.add_parser(
'''serve''', help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''', type=lowerCamelCase, choices=get_supported_tasks(), help='''The task to run the pipeline on''', )
serve_parser.add_argument('''--host''', type=lowerCamelCase, default='''localhost''', help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''', type=lowerCamelCase, default=8888, help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''', type=lowerCamelCase, default=1, help='''Number of http workers''' )
serve_parser.add_argument('''--model''', type=lowerCamelCase, help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''', type=lowerCamelCase, help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''', type=lowerCamelCase, help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''', type=lowerCamelCase, default=-1, help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''', )
serve_parser.set_defaults(func=lowerCamelCase )
def __init__( self : Any, lowerCamelCase : Pipeline, lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : int )-> Any:
lowerCamelCase__ : Dict =pipeline
lowerCamelCase__ : List[Any] =host
lowerCamelCase__ : List[Any] =port
lowerCamelCase__ : Union[str, Any] =workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F'''Serving model over {host}:{port}''' )
lowerCamelCase__ : Union[str, Any] =FastAPI(
routes=[
APIRoute(
'''/''', self.model_info, response_model=lowerCamelCase, response_class=lowerCamelCase, methods=['''GET'''], ),
APIRoute(
'''/tokenize''', self.tokenize, response_model=lowerCamelCase, response_class=lowerCamelCase, methods=['''POST'''], ),
APIRoute(
'''/detokenize''', self.detokenize, response_model=lowerCamelCase, response_class=lowerCamelCase, methods=['''POST'''], ),
APIRoute(
'''/forward''', self.forward, response_model=lowerCamelCase, response_class=lowerCamelCase, methods=['''POST'''], ),
], timeout=600, )
def snake_case ( self : Dict )-> Union[str, Any]:
run(self._app, host=self.host, port=self.port, workers=self.workers )
def snake_case ( self : Optional[int] )-> Tuple:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def snake_case ( self : int, lowerCamelCase : str = Body(lowerCamelCase, embed=lowerCamelCase ), lowerCamelCase : bool = Body(lowerCamelCase, embed=lowerCamelCase ) )-> Union[str, Any]:
try:
lowerCamelCase__ : Dict =self._pipeline.tokenizer.tokenize(lowerCamelCase )
if return_ids:
lowerCamelCase__ : int =self._pipeline.tokenizer.convert_tokens_to_ids(lowerCamelCase )
return ServeTokenizeResult(tokens=lowerCamelCase, tokens_ids=lowerCamelCase )
else:
return ServeTokenizeResult(tokens=lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=500, detail={'''model''': '''''', '''error''': str(lowerCamelCase )} )
def snake_case ( self : Optional[Any], lowerCamelCase : List[int] = Body(lowerCamelCase, embed=lowerCamelCase ), lowerCamelCase : bool = Body(lowerCamelCase, embed=lowerCamelCase ), lowerCamelCase : bool = Body(lowerCamelCase, embed=lowerCamelCase ), )-> Union[str, Any]:
try:
lowerCamelCase__ : List[str] =self._pipeline.tokenizer.decode(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return ServeDeTokenizeResult(model='''''', text=lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=500, detail={'''model''': '''''', '''error''': str(lowerCamelCase )} )
async def snake_case ( self : Union[str, Any], lowerCamelCase : Tuple=Body(lowerCamelCase, embed=lowerCamelCase ) )-> Tuple:
# Check we don't have empty string
if len(lowerCamelCase ) == 0:
return ServeForwardResult(output=[], attention=[] )
try:
# Forward through the model
lowerCamelCase__ : Union[str, Any] =self._pipeline(lowerCamelCase )
return ServeForwardResult(output=lowerCamelCase )
except Exception as e:
raise HTTPException(500, {'''error''': str(lowerCamelCase )} )
| 625 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase : List[str] = 2_5_0_0_0_4
_lowercase : Optional[Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = MBartTokenizer
_a = MBartTokenizerFast
_a = True
_a = True
def snake_case ( self : Tuple )-> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Union[str, Any] =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Dict )-> Union[str, Any]:
lowerCamelCase__ : Any =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
lowerCamelCase__ : List[Any] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowerCamelCase__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
lowerCamelCase__ : str =tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def snake_case ( self : Tuple )-> List[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ : int =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[str] =tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase__ : List[str] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Any =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ : Dict =tempfile.mkdtemp()
lowerCamelCase__ : List[str] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Tuple =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Optional[int] =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Any =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ : Optional[int] =tempfile.mkdtemp()
lowerCamelCase__ : int =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Dict =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : int =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = 'facebook/mbart-large-en-ro'
_a = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_a = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_a = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case ( cls : List[Any] )-> Optional[int]:
lowerCamelCase__ : MBartTokenizer =MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' )
lowerCamelCase__ : Optional[int] =1
return cls
def snake_case ( self : Optional[Any] )-> List[str]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_0020 )
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
def snake_case ( self : Optional[Any] )-> str:
self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids )
lowerCamelCase__ : Optional[int] =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Optional[int] =['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0], lowerCamelCase )
lowerCamelCase__ : Dict =10
lowerCamelCase__ : Optional[int] =self.tokenizer(lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], lowerCamelCase )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
def snake_case ( self : int )-> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_0026, 25_0001] )
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : int =tempfile.mkdtemp()
lowerCamelCase__ : Optional[int] =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =MBartTokenizer.from_pretrained(lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase )
@require_torch
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ : Optional[Any] =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, return_tensors='''pt''' )
lowerCamelCase__ : Dict =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case ( self : Optional[Any] )-> Any:
lowerCamelCase__ : str =self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', )
lowerCamelCase__ : List[Any] =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
lowerCamelCase__ : Any =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case ( self : List[Any] )-> Dict:
lowerCamelCase__ : Any =self.tokenizer(self.src_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=3, return_tensors='''pt''' )
lowerCamelCase__ : Tuple =self.tokenizer(
text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=10, return_tensors='''pt''' )
lowerCamelCase__ : Union[str, Any] =targets['''input_ids''']
lowerCamelCase__ : List[Any] =shift_tokens_right(lowerCamelCase, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : str =self.tokenizer._build_translation_inputs(
'''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase ), {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 25_0004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
}, )
| 625 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = CTRLTokenizer
_a = False
_a = False
def snake_case ( self : Optional[int] )-> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : List[str] =['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowerCamelCase__ : Optional[Any] =dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowerCamelCase__ : Optional[int] =['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowerCamelCase__ : Dict ={'''unk_token''': '''<unk>'''}
lowerCamelCase__ : Any =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : List[Any] =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase ) )
def snake_case ( self : List[str], **lowerCamelCase : Optional[Any] )-> Dict:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : List[str] )-> Union[str, Any]:
lowerCamelCase__ : List[str] ='''adapt react readapt apt'''
lowerCamelCase__ : int ='''adapt react readapt apt'''
return input_text, output_text
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : List[str] =CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowerCamelCase__ : List[Any] ='''adapt react readapt apt'''
lowerCamelCase__ : Union[str, Any] ='''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowerCamelCase__ : Optional[int] =tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : List[str] =tokens + [tokenizer.unk_token]
lowerCamelCase__ : List[str] =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), lowerCamelCase )
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 625 | 1 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def snake_case__ ( __lowerCamelCase : list[int] , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =[0] * no_of_processes
lowerCamelCase__ : List[str] =[0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__lowerCamelCase ):
lowerCamelCase__ : Optional[int] =burst_time[i]
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Any =0
lowerCamelCase__ : List[str] =999999999
lowerCamelCase__ : Dict =0
lowerCamelCase__ : str =False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__lowerCamelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowerCamelCase__ : Optional[Any] =remaining_time[j]
lowerCamelCase__ : str =j
lowerCamelCase__ : Union[str, Any] =True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowerCamelCase__ : Optional[Any] =remaining_time[short]
if minm == 0:
lowerCamelCase__ : Tuple =999999999
if remaining_time[short] == 0:
complete += 1
lowerCamelCase__ : Union[str, Any] =False
# Find finish time of current process
lowerCamelCase__ : Union[str, Any] =increment_time + 1
# Calculate waiting time
lowerCamelCase__ : Dict =finish_time - arrival_time[short]
lowerCamelCase__ : Dict =finar - burst_time[short]
if waiting_time[short] < 0:
lowerCamelCase__ : Tuple =0
# Increment time
increment_time += 1
return waiting_time
def snake_case__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : list[int] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[0] * no_of_processes
for i in range(__lowerCamelCase ):
lowerCamelCase__ : Tuple =burst_time[i] + waiting_time[i]
return turn_around_time
def snake_case__ ( __lowerCamelCase : list[int] , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : str =0
lowerCamelCase__ : List[str] =0
for i in range(__lowerCamelCase ):
lowerCamelCase__ : int =total_waiting_time + waiting_time[i]
lowerCamelCase__ : int =total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
_lowercase : Tuple = int(input())
_lowercase : str = [0] * no_of_processes
_lowercase : Dict = [0] * no_of_processes
_lowercase : Any = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
_lowercase , _lowercase : Dict = map(int, input().split())
_lowercase : Optional[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_lowercase : Optional[int] = burst_time
_lowercase : str = no_of_processes
_lowercase : Dict = waiting_time
_lowercase : Optional[Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_lowercase : Union[str, Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 22 ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =range(1 , __lowerCamelCase )
lowerCamelCase__ : str =range(1 , __lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }')
| 625 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'SpeechT5FeatureExtractor'
_a = 'SpeechT5Tokenizer'
def __init__( self : Optional[int], lowerCamelCase : str, lowerCamelCase : Any )-> List[Any]:
super().__init__(lowerCamelCase, lowerCamelCase )
def __call__( self : Optional[int], *lowerCamelCase : Any, **lowerCamelCase : Optional[Any] )-> Dict:
lowerCamelCase__ : Union[str, Any] =kwargs.pop('''audio''', lowerCamelCase )
lowerCamelCase__ : List[Any] =kwargs.pop('''text''', lowerCamelCase )
lowerCamelCase__ : int =kwargs.pop('''text_target''', lowerCamelCase )
lowerCamelCase__ : str =kwargs.pop('''audio_target''', lowerCamelCase )
lowerCamelCase__ : Optional[int] =kwargs.pop('''sampling_rate''', lowerCamelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowerCamelCase__ : str =self.feature_extractor(lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, **lowerCamelCase )
elif text is not None:
lowerCamelCase__ : Union[str, Any] =self.tokenizer(lowerCamelCase, **lowerCamelCase )
else:
lowerCamelCase__ : Dict =None
if audio_target is not None:
lowerCamelCase__ : Dict =self.feature_extractor(audio_target=lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Optional[int] =targets['''input_values''']
elif text_target is not None:
lowerCamelCase__ : str =self.tokenizer(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[str] =targets['''input_ids''']
else:
lowerCamelCase__ : List[str] =None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__ : Any =labels
lowerCamelCase__ : Union[str, Any] =targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCamelCase__ : List[Any] =decoder_attention_mask
return inputs
def snake_case ( self : Optional[int], *lowerCamelCase : Dict, **lowerCamelCase : List[str] )-> int:
lowerCamelCase__ : Union[str, Any] =kwargs.pop('''input_values''', lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =kwargs.pop('''input_ids''', lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =kwargs.pop('''labels''', lowerCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowerCamelCase__ : Optional[int] =self.feature_extractor.pad(lowerCamelCase, *lowerCamelCase, **lowerCamelCase )
elif input_ids is not None:
lowerCamelCase__ : Optional[int] =self.tokenizer.pad(lowerCamelCase, **lowerCamelCase )
else:
lowerCamelCase__ : Union[str, Any] =None
if labels is not None:
if "input_ids" in labels or (isinstance(lowerCamelCase, lowerCamelCase ) and "input_ids" in labels[0]):
lowerCamelCase__ : Any =self.tokenizer.pad(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Optional[Any] =targets['''input_ids''']
else:
lowerCamelCase__ : List[str] =self.feature_extractor.feature_size
lowerCamelCase__ : Tuple =self.feature_extractor.num_mel_bins
lowerCamelCase__ : Any =self.feature_extractor.pad(lowerCamelCase, *lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[Any] =feature_size_hack
lowerCamelCase__ : Optional[int] =targets['''input_values''']
else:
lowerCamelCase__ : Union[str, Any] =None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__ : Dict =labels
lowerCamelCase__ : List[Any] =targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCamelCase__ : List[str] =decoder_attention_mask
return inputs
def snake_case ( self : int, *lowerCamelCase : str, **lowerCamelCase : Tuple )-> List[str]:
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase )
def snake_case ( self : Dict, *lowerCamelCase : str, **lowerCamelCase : Dict )-> Optional[Any]:
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase )
| 625 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def snake_case ( self : Dict, lowerCamelCase : List[str], lowerCamelCase : Any )-> Union[str, Any]:
pass
def snake_case ( self : List[str] )-> List[str]:
pass
def snake_case ( self : Optional[Any] )-> str:
pass
def snake_case ( self : Union[str, Any], lowerCamelCase : np.ndarray, lowerCamelCase : np.ndarray, lowerCamelCase : float )-> Dict:
lowerCamelCase__ : Union[str, Any] =np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Any=None, **lowerCamelCase : str )-> int:
lowerCamelCase__ : List[str] =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Dict =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str=None, **lowerCamelCase : List[Any] )-> int:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=None, **lowerCamelCase : int )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Optional[int] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[Any] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : int =output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : List[str] =after_output[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-3 )
def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[str] =model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase )
lowerCamelCase__ : int =output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Tuple =to_atuple(vision_model.config.image_size )
lowerCamelCase__ : Optional[Any] =to_atuple(vision_model.config.patch_size )
lowerCamelCase__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCamelCase__ : int =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCamelCase__ : List[Any] =output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] )-> Any:
pt_model.to(lowerCamelCase )
pt_model.eval()
# prepare inputs
lowerCamelCase__ : Any =inputs_dict
lowerCamelCase__ : Any ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCamelCase__ : List[str] =pt_model(**lowerCamelCase ).to_tuple()
lowerCamelCase__ : Optional[Any] =fx_model(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase )
lowerCamelCase__ : List[Any] =fx_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : str =VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase )
pt_model_loaded.to(lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2 )
def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : str )-> List[Any]:
lowerCamelCase__ : Any =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : str =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase )
lowerCamelCase__ : Tuple =fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ : Dict =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Any =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : int =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase )
def snake_case ( self : Tuple )-> Any:
lowerCamelCase__ : Tuple =self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase )
def snake_case ( self : str )-> Any:
lowerCamelCase__ : str =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase )
@is_pt_flax_cross_test
def snake_case ( self : Tuple )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs()
lowerCamelCase__ : Union[str, Any] =config_inputs_dict.pop('''vision_config''' )
lowerCamelCase__ : Optional[Any] =config_inputs_dict.pop('''text_config''' )
lowerCamelCase__ : Tuple =config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase )
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@slow
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.get_pretrained_model_and_inputs()
lowerCamelCase__ : Optional[int] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[str] =outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase )
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[Any] =after_outputs[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-5 )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : List[str] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : List[str] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : Optional[int] =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Any ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : int )-> int:
lowerCamelCase__ : str =FlaxViTModel(lowerCamelCase )
lowerCamelCase__ : Any =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : int )-> Optional[int]:
lowerCamelCase__ : Any =FlaxViTModelTester(self )
lowerCamelCase__ : Union[str, Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =vit_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Any =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Union[str, Any] =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : Optional[Any] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : Union[str, Any] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : str =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Dict )-> Dict:
lowerCamelCase__ : str =FlaxCLIPVisionModel(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[Any] =FlaxCLIPVisionModelTester(self )
lowerCamelCase__ : List[Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =clip_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[int] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : List[Any] =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Any =FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase__ : Dict =processor(
text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' )
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
lowerCamelCase__ : Any =np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3 ) )
| 625 | 1 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'EncodecFeatureExtractor'
_a = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str, lowerCamelCase : str, lowerCamelCase : List[Any] )-> List[str]:
super().__init__(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : int =self.feature_extractor
lowerCamelCase__ : str =False
def snake_case ( self : Any, lowerCamelCase : Tuple=None, lowerCamelCase : str=None, lowerCamelCase : int=True )-> Optional[int]:
return self.tokenizer.get_decoder_prompt_ids(task=lowerCamelCase, language=lowerCamelCase, no_timestamps=lowerCamelCase )
def __call__( self : Dict, *lowerCamelCase : int, **lowerCamelCase : Any )-> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : str =kwargs.pop('''audio''', lowerCamelCase )
lowerCamelCase__ : str =kwargs.pop('''sampling_rate''', lowerCamelCase )
lowerCamelCase__ : Any =kwargs.pop('''text''', lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCamelCase__ : str =args[0]
lowerCamelCase__ : Dict =args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
lowerCamelCase__ : Union[str, Any] =self.tokenizer(lowerCamelCase, **lowerCamelCase )
if audio is not None:
lowerCamelCase__ : str =self.feature_extractor(lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, **lowerCamelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCamelCase__ : Union[str, Any] =audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
lowerCamelCase__ : str =audio_inputs['''padding_mask''']
return inputs
def snake_case ( self : Union[str, Any], *lowerCamelCase : Dict, **lowerCamelCase : Union[str, Any] )-> List[str]:
lowerCamelCase__ : int =kwargs.pop('''audio''', lowerCamelCase )
lowerCamelCase__ : Optional[int] =kwargs.pop('''padding_mask''', lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCamelCase__ : Tuple =args[0]
lowerCamelCase__ : List[Any] =args[1:]
if audio_values is not None:
return self._decode_audio(lowerCamelCase, padding_mask=lowerCamelCase )
else:
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase )
def snake_case ( self : Tuple, *lowerCamelCase : Optional[int], **lowerCamelCase : Optional[int] )-> List[str]:
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase )
def snake_case ( self : str, lowerCamelCase : Optional[Any], lowerCamelCase : Optional = None )-> List[np.ndarray]:
lowerCamelCase__ : Optional[int] =to_numpy(lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =audio_values.shape
if padding_mask is None:
return list(lowerCamelCase )
lowerCamelCase__ : Tuple =to_numpy(lowerCamelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCamelCase__ : Optional[Any] =seq_len - padding_mask.shape[-1]
lowerCamelCase__ : Any =1 - self.feature_extractor.padding_value
lowerCamelCase__ : Optional[Any] =np.pad(lowerCamelCase, ((0, 0), (0, difference)), '''constant''', constant_values=lowerCamelCase )
lowerCamelCase__ : str =audio_values.tolist()
for i in range(lowerCamelCase ):
lowerCamelCase__ : Optional[Any] =np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCamelCase__ : Optional[int] =sliced_audio.reshape(lowerCamelCase, -1 )
return audio_values
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if index == number_of_items:
return 0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : List[str] =knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 )
if weights[index] <= max_weight:
lowerCamelCase__ : Dict =values[index] + knapsack(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_weight - weights[index] , index + 1 )
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = CycleDiffusionPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
_a = PipelineTesterMixin.required_optional_params - {'latents'}
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
_a = IMAGE_TO_IMAGE_IMAGE_PARAMS
_a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self : Dict )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase__ : Any =UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, )
lowerCamelCase__ : Tuple =DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='''scaled_linear''', num_train_timesteps=1000, clip_sample=lowerCamelCase, set_alpha_to_one=lowerCamelCase, )
torch.manual_seed(0 )
lowerCamelCase__ : Any =AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
torch.manual_seed(0 )
lowerCamelCase__ : List[str] =CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
lowerCamelCase__ : Optional[Any] =CLIPTextModel(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase__ : List[Any] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Optional[Any]=0 )-> str:
lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =image / 2 + 0.5
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : int =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Dict =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : int ={
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def snake_case ( self : Any )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Any =self.get_dummy_components()
lowerCamelCase__ : Any =CycleDiffusionPipeline(**lowerCamelCase )
lowerCamelCase__ : int =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.get_dummy_inputs(lowerCamelCase )
lowerCamelCase__ : int =pipe(**lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =output.images
lowerCamelCase__ : Dict =images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCamelCase__ : List[str] =np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''', '''This test requires a GPU''' )
def snake_case ( self : str )-> Dict:
lowerCamelCase__ : int =self.get_dummy_components()
for name, module in components.items():
if hasattr(lowerCamelCase, '''half''' ):
lowerCamelCase__ : List[str] =module.half()
lowerCamelCase__ : Dict =CycleDiffusionPipeline(**lowerCamelCase )
lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =self.get_dummy_inputs(lowerCamelCase )
lowerCamelCase__ : Any =pipe(**lowerCamelCase )
lowerCamelCase__ : Tuple =output.images
lowerCamelCase__ : Union[str, Any] =images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCamelCase__ : List[Any] =np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def snake_case ( self : Optional[Any] )-> Union[str, Any]:
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def snake_case ( self : Optional[int] )-> Tuple:
return super().test_inference_batch_single_identical()
@skip_mps
def snake_case ( self : Union[str, Any] )-> List[Any]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def snake_case ( self : List[Any] )-> Optional[int]:
return super().test_save_load_optional_components()
@skip_mps
def snake_case ( self : Any )-> Union[str, Any]:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[Any] )-> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Tuple )-> Union[str, Any]:
lowerCamelCase__ : List[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
lowerCamelCase__ : List[str] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
lowerCamelCase__ : Dict =init_image.resize((512, 512) )
lowerCamelCase__ : Dict ='''CompVis/stable-diffusion-v1-4'''
lowerCamelCase__ : Union[str, Any] =DDIMScheduler.from_pretrained(lowerCamelCase, subfolder='''scheduler''' )
lowerCamelCase__ : str =CycleDiffusionPipeline.from_pretrained(
lowerCamelCase, scheduler=lowerCamelCase, safety_checker=lowerCamelCase, torch_dtype=torch.floataa, revision='''fp16''' )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase__ : Tuple ='''A black colored car'''
lowerCamelCase__ : List[str] ='''A blue colored car'''
lowerCamelCase__ : List[Any] =torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =pipe(
prompt=lowerCamelCase, source_prompt=lowerCamelCase, image=lowerCamelCase, num_inference_steps=100, eta=0.1, strength=0.85, guidance_scale=3, source_guidance_scale=1, generator=lowerCamelCase, output_type='''np''', )
lowerCamelCase__ : List[str] =output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : List[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
lowerCamelCase__ : Tuple =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
lowerCamelCase__ : Tuple =init_image.resize((512, 512) )
lowerCamelCase__ : int ='''CompVis/stable-diffusion-v1-4'''
lowerCamelCase__ : Union[str, Any] =DDIMScheduler.from_pretrained(lowerCamelCase, subfolder='''scheduler''' )
lowerCamelCase__ : str =CycleDiffusionPipeline.from_pretrained(lowerCamelCase, scheduler=lowerCamelCase, safety_checker=lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase__ : Any ='''A black colored car'''
lowerCamelCase__ : int ='''A blue colored car'''
lowerCamelCase__ : int =torch.manual_seed(0 )
lowerCamelCase__ : Any =pipe(
prompt=lowerCamelCase, source_prompt=lowerCamelCase, image=lowerCamelCase, num_inference_steps=100, eta=0.1, strength=0.85, guidance_scale=3, source_guidance_scale=1, generator=lowerCamelCase, output_type='''np''', )
lowerCamelCase__ : List[str] =output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 625 |
"""simple docstring"""
_lowercase : Optional[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 625 | 1 |
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =prime_factors(__lowerCamelCase )
if is_square_free(__lowerCamelCase ):
return -1 if len(__lowerCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[int] ):
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCamelCase__ : Any =numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
lowerCamelCase__ : Dict =numbers[i]
if number < 0:
lowerCamelCase__ , lowerCamelCase__ : List[Any] =min_till_now, max_till_now
lowerCamelCase__ : Optional[int] =max(__lowerCamelCase , max_till_now * number )
lowerCamelCase__ : Dict =min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
lowerCamelCase__ : Tuple =max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 625 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Tuple = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 625 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 42
_a = 42
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 42
_a = (1_6, 3_2, 9_6, 2_5_6)
_a = jnp.floataa
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Tuple =nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
lowerCamelCase__ : Dict =[]
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ : Dict =self.block_out_channels[i]
lowerCamelCase__ : Dict =self.block_out_channels[i + 1]
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Optional[int] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Any =blocks
lowerCamelCase__ : Optional[int] =nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : Any, lowerCamelCase : int )-> List[str]:
lowerCamelCase__ : Tuple =self.conv_in(lowerCamelCase )
lowerCamelCase__ : Dict =nn.silu(lowerCamelCase )
for block in self.blocks:
lowerCamelCase__ : str =block(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =self.conv_out(lowerCamelCase )
return embedding
@flax_register_to_config
class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_a = 3_2
_a = 4
_a = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_a = False
_a = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_a = 2
_a = 8
_a = None
_a = 1_2_8_0
_a = 0.0
_a = False
_a = jnp.floataa
_a = True
_a = 0
_a = "rgb"
_a = (1_6, 3_2, 9_6, 2_5_6)
def snake_case ( self : str, lowerCamelCase : jax.random.KeyArray )-> FrozenDict:
# init input tensors
lowerCamelCase__ : int =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : int =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ : Union[str, Any] =jnp.ones((1,), dtype=jnp.intaa )
lowerCamelCase__ : str =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
lowerCamelCase__ : Any =(1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ : Optional[Any] =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =jax.random.split(lowerCamelCase )
lowerCamelCase__ : Dict ={'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )["params"]
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Optional[int] =self.block_out_channels
lowerCamelCase__ : Tuple =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : List[Any] =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : int =nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
lowerCamelCase__ : str =FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
lowerCamelCase__ : Dict =FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype )
lowerCamelCase__ : List[Any] =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
lowerCamelCase__ : Dict =self.only_cross_attention
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =(only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[str] =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : Union[str, Any] =[]
lowerCamelCase__ : Dict =[]
lowerCamelCase__ : List[Any] =block_out_channels[0]
lowerCamelCase__ : List[Any] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : List[Any] =output_channel
lowerCamelCase__ : str =block_out_channels[i]
lowerCamelCase__ : Dict =i == len(lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : str =FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
lowerCamelCase__ : List[Any] =FlaxDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(lowerCamelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
if not is_final_block:
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
lowerCamelCase__ : int =down_blocks
lowerCamelCase__ : List[str] =controlnet_down_blocks
# mid
lowerCamelCase__ : Tuple =block_out_channels[-1]
lowerCamelCase__ : List[Any] =FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : int, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : float = 1.0, lowerCamelCase : bool = True, lowerCamelCase : bool = False, )-> Union[FlaxControlNetOutput, Tuple]:
lowerCamelCase__ : int =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ : int =jnp.flip(lowerCamelCase, axis=1 )
# 1. time
if not isinstance(lowerCamelCase, jnp.ndarray ):
lowerCamelCase__ : Any =jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(lowerCamelCase, jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : int =jnp.expand_dims(lowerCamelCase, 0 )
lowerCamelCase__ : Optional[Any] =self.time_proj(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.time_embedding(lowerCamelCase )
# 2. pre-process
lowerCamelCase__ : Optional[int] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : Dict =self.conv_in(lowerCamelCase )
lowerCamelCase__ : List[str] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : int =self.controlnet_cond_embedding(lowerCamelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase__ : Union[str, Any] =(sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : Dict =down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : Tuple =down_block(lowerCamelCase, lowerCamelCase, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ : Optional[int] =self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ : Optional[Any] =()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks ):
lowerCamelCase__ : Union[str, Any] =controlnet_block(lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : List[str] =controlnet_down_block_res_samples
lowerCamelCase__ : List[str] =self.controlnet_mid_block(lowerCamelCase )
# 6. scaling
lowerCamelCase__ : Union[str, Any] =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase )
| 625 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : Union[str, Any] = 1_6
_lowercase : List[Any] = 3_2
def snake_case__ ( __lowerCamelCase : Accelerator , __lowerCamelCase : int = 16 , __lowerCamelCase : str = "bert-base-cased" ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =AutoTokenizer.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase : str ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ : Union[str, Any] =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase__ : Optional[Any] =datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ : List[str] =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCamelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__lowerCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCamelCase__ : Optional[Any] =DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowerCamelCase__ : Dict =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ):
"""simple docstring"""
# Initialize accelerator
lowerCamelCase__ : int =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : Optional[int] =config['''lr''']
lowerCamelCase__ : Optional[Any] =int(config['''num_epochs'''] )
lowerCamelCase__ : str =int(config['''seed'''] )
lowerCamelCase__ : Dict =int(config['''batch_size'''] )
lowerCamelCase__ : Optional[Any] =args.model_name_or_path
set_seed(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =get_dataloaders(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : Optional[int] =AutoModelForSequenceClassification.from_pretrained(__lowerCamelCase , return_dict=__lowerCamelCase )
# Instantiate optimizer
lowerCamelCase__ : int =(
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase__ : List[str] =optimizer_cls(params=model.parameters() , lr=__lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase__ : Optional[int] =accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCamelCase__ : Dict =1
lowerCamelCase__ : str =(len(__lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase__ : Dict =get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=0 , num_training_steps=__lowerCamelCase , )
else:
lowerCamelCase__ : Dict =DummyScheduler(__lowerCamelCase , total_num_steps=__lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int =accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase__ : Optional[int] =0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase__ : Union[str, Any] =0
# Now we train the model
lowerCamelCase__ : str =evaluate.load('''glue''' , '''mrpc''' )
lowerCamelCase__ : Any =0
lowerCamelCase__ : str ={}
for epoch in range(__lowerCamelCase , __lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Tuple =model(**__lowerCamelCase )
lowerCamelCase__ : Any =outputs.loss
lowerCamelCase__ : Union[str, Any] =loss / gradient_accumulation_steps
accelerator.backward(__lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowerCamelCase__ : Optional[Any] =0
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**__lowerCamelCase )
lowerCamelCase__ : Dict =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__lowerCamelCase ) - 1:
lowerCamelCase__ : str =predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase__ : Dict =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowerCamelCase__ : Tuple =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __lowerCamelCase )
lowerCamelCase__ : Any =eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
lowerCamelCase__ : Any =eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : str =argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__lowerCamelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__lowerCamelCase , )
parser.add_argument(
'''--output_dir''' , type=__lowerCamelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=__lowerCamelCase , default=__lowerCamelCase , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=__lowerCamelCase , default=3 , help='''Number of train epochs.''' , )
lowerCamelCase__ : int =parser.parse_args()
lowerCamelCase__ : Union[str, Any] ={'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 625 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["CLIPFeatureExtractor"]
_lowercase : int = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 625 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_lowercase : Any = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = ['pixel_values']
def __init__( self : Optional[int], lowerCamelCase : bool = True, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC, lowerCamelCase : bool = True, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : bool = True, lowerCamelCase : Union[int, float] = 1 / 255, lowerCamelCase : bool = True, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : bool = True, **lowerCamelCase : int, )-> None:
super().__init__(**lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =size if size is not None else {'''shortest_edge''': 224}
lowerCamelCase__ : Dict =get_size_dict(lowerCamelCase, default_to_square=lowerCamelCase )
lowerCamelCase__ : Dict =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCamelCase__ : List[Any] =get_size_dict(lowerCamelCase, default_to_square=lowerCamelCase, param_name='''crop_size''' )
lowerCamelCase__ : List[Any] =do_resize
lowerCamelCase__ : List[str] =size
lowerCamelCase__ : Optional[Any] =resample
lowerCamelCase__ : Optional[int] =do_center_crop
lowerCamelCase__ : str =crop_size
lowerCamelCase__ : Optional[int] =do_rescale
lowerCamelCase__ : List[str] =rescale_factor
lowerCamelCase__ : List[str] =do_normalize
lowerCamelCase__ : List[Any] =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase__ : Any =image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase__ : Optional[Any] =do_convert_rgb
def snake_case ( self : Tuple, lowerCamelCase : np.ndarray, lowerCamelCase : Dict[str, int], lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : Tuple, )-> np.ndarray:
lowerCamelCase__ : Union[str, Any] =get_size_dict(lowerCamelCase, default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase__ : Tuple =get_resize_output_image_size(lowerCamelCase, size=size['''shortest_edge'''], default_to_square=lowerCamelCase )
return resize(lowerCamelCase, size=lowerCamelCase, resample=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase )
def snake_case ( self : Any, lowerCamelCase : np.ndarray, lowerCamelCase : Dict[str, int], lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : List[str], )-> np.ndarray:
lowerCamelCase__ : List[Any] =get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCamelCase, size=(size['''height'''], size['''width''']), data_format=lowerCamelCase, **lowerCamelCase )
def snake_case ( self : int, lowerCamelCase : np.ndarray, lowerCamelCase : Union[int, float], lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : int, )-> str:
return rescale(lowerCamelCase, scale=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase )
def snake_case ( self : Any, lowerCamelCase : np.ndarray, lowerCamelCase : Union[float, List[float]], lowerCamelCase : Union[float, List[float]], lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : Tuple, )-> np.ndarray:
return normalize(lowerCamelCase, mean=lowerCamelCase, std=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase )
def snake_case ( self : Union[str, Any], lowerCamelCase : ImageInput, lowerCamelCase : bool = None, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : PILImageResampling = None, lowerCamelCase : bool = None, lowerCamelCase : int = None, lowerCamelCase : bool = None, lowerCamelCase : float = None, lowerCamelCase : bool = None, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : bool = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST, **lowerCamelCase : Union[str, Any], )-> PIL.Image.Image:
lowerCamelCase__ : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Any =size if size is not None else self.size
lowerCamelCase__ : List[Any] =get_size_dict(lowerCamelCase, param_name='''size''', default_to_square=lowerCamelCase )
lowerCamelCase__ : Optional[Any] =resample if resample is not None else self.resample
lowerCamelCase__ : str =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ : List[str] =crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ : Union[str, Any] =get_size_dict(lowerCamelCase, param_name='''crop_size''', default_to_square=lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : List[str] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Dict =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Tuple =image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : Optional[int] =image_std if image_std is not None else self.image_std
lowerCamelCase__ : Optional[Any] =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase__ : str =make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase__ : Any =[convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase__ : Tuple =[to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
lowerCamelCase__ : str =[self.resize(image=lowerCamelCase, size=lowerCamelCase, resample=lowerCamelCase ) for image in images]
if do_center_crop:
lowerCamelCase__ : Tuple =[self.center_crop(image=lowerCamelCase, size=lowerCamelCase ) for image in images]
if do_rescale:
lowerCamelCase__ : Union[str, Any] =[self.rescale(image=lowerCamelCase, scale=lowerCamelCase ) for image in images]
if do_normalize:
lowerCamelCase__ : Union[str, Any] =[self.normalize(image=lowerCamelCase, mean=lowerCamelCase, std=lowerCamelCase ) for image in images]
lowerCamelCase__ : Union[str, Any] =[to_channel_dimension_format(lowerCamelCase, lowerCamelCase ) for image in images]
lowerCamelCase__ : Union[str, Any] ={'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase, tensor_type=lowerCamelCase )
| 625 |
"""simple docstring"""
import os
def snake_case__ ( ):
"""simple docstring"""
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowerCamelCase__ : Tuple =str(file.readlines()[0] )
lowerCamelCase__ : int =names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : str =0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict =0
return total_score
if __name__ == "__main__":
print(solution())
| 625 | 1 |
"""simple docstring"""
_lowercase : str = 0 # The first color of the flag.
_lowercase : Dict = 1 # The second color of the flag.
_lowercase : Tuple = 2 # The third color of the flag.
_lowercase : Optional[int] = (red, white, blue)
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
if not sequence:
return []
if len(__lowerCamelCase ) == 1:
return list(__lowerCamelCase )
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Dict =len(__lowerCamelCase ) - 1
lowerCamelCase__ : Tuple =0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =sequence[high], sequence[mid]
high -= 1
else:
lowerCamelCase__ : Dict =f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[Any] = input("Enter numbers separated by commas:\n").strip()
_lowercase : int = [int(item.strip()) for item in user_input.split(",")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 625 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : int )-> None:
lowerCamelCase__ : str =value
lowerCamelCase__ : Node | None =None
lowerCamelCase__ : Node | None =None
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int, lowerCamelCase : Node )-> None:
lowerCamelCase__ : Any =tree
def snake_case ( self : str, lowerCamelCase : Node | None )-> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict )-> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 1 |
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str=1000 ):
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCamelCase__ : Optional[int] =n - 1
lowerCamelCase__ : Optional[Any] =0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCamelCase__ : Optional[int] =0
while count < prec:
lowerCamelCase__ : int =random.randint(2 , n - 1 )
lowerCamelCase__ : List[str] =bin_exp_mod(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if b != 1:
lowerCamelCase__ : Tuple =True
for _ in range(__lowerCamelCase ):
if b == n - 1:
lowerCamelCase__ : Dict =False
break
lowerCamelCase__ : List[str] =b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowercase : Any = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 625 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowercase : List[str] = logging.getLogger(__name__)
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
"""simple docstring"""
# save results
if os.path.exists(__lowerCamelCase ):
if os.path.exists(os.path.join(__lowerCamelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''config.json''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''config.json''' ) )
if os.path.exists(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =2
if unlogit:
lowerCamelCase__ : Any =torch.pow(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[str] =p * torch.log(__lowerCamelCase )
lowerCamelCase__ : Tuple =0
return -plogp.sum(dim=-1 )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(__lowerCamelCase ) ) ) )
for row in range(len(__lowerCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=False ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Tuple =model.config.num_hidden_layers, model.config.num_attention_heads
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
if head_mask is None:
lowerCamelCase__ : List[Any] =torch.ones(__lowerCamelCase , __lowerCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : List[str] =0.0
lowerCamelCase__ : Union[str, Any] =0.0
for step, inputs in enumerate(tqdm(__lowerCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowerCamelCase__ : Any =tuple(t.to(args.device ) for t in inputs )
((lowerCamelCase__) , ) : Any =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCamelCase__ : Dict =model(__lowerCamelCase , labels=__lowerCamelCase , head_mask=__lowerCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Any =entropy(attn.detach() , __lowerCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCamelCase__ : int =2
lowerCamelCase__ : List[str] =torch.pow(torch.pow(__lowerCamelCase , __lowerCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
lowerCamelCase__ : int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(__lowerCamelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(__lowerCamelCase )
logger.info('''Head ranked by importance scores''' )
lowerCamelCase__ : Optional[int] =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCamelCase__ : Dict =torch.arange(
head_importance.numel() , device=args.device )
lowerCamelCase__ : Any =head_ranks.view_as(__lowerCamelCase )
print_ad_tensor(__lowerCamelCase )
return attn_entropy, head_importance, total_loss
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase )
lowerCamelCase__ : int =1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __lowerCamelCase , original_score * args.masking_threshold )
lowerCamelCase__ : Dict =torch.ones_like(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCamelCase__ : List[Any] =original_score
while current_score >= original_score * args.masking_threshold:
lowerCamelCase__ : List[Any] =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCamelCase__ : int =float('''Inf''' )
lowerCamelCase__ : Union[str, Any] =head_importance.view(-1 ).sort()[1]
if len(__lowerCamelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowerCamelCase__ : List[str] =current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowerCamelCase__ : Optional[int] =new_head_mask.view(-1 )
lowerCamelCase__ : Optional[Any] =0.0
lowerCamelCase__ : Dict =new_head_mask.view_as(__lowerCamelCase )
lowerCamelCase__ : Tuple =new_head_mask.clone().detach()
print_ad_tensor(__lowerCamelCase )
# Compute metric and head importance again
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Any =1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(__lowerCamelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Tuple =1 / loss
lowerCamelCase__ : Optional[Any] =datetime.now() - before_time
lowerCamelCase__ : int =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Optional[int] =[
v,
]
assert sum(len(__lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCamelCase )
lowerCamelCase__ : List[str] =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase , actually_pruned=__lowerCamelCase , )
lowerCamelCase__ : str =1 / loss
lowerCamelCase__ : Union[str, Any] =datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __lowerCamelCase , __lowerCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __lowerCamelCase , __lowerCamelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(__lowerCamelCase , args.output_dir )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__lowerCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__lowerCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__lowerCamelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=__lowerCamelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowerCamelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__lowerCamelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=__lowerCamelCase , default=42 )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
lowerCamelCase__ : List[Any] =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCamelCase__ : Dict =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowerCamelCase__ : Dict =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCamelCase__ : str =torch.device('''cuda''' , args.local_rank )
lowerCamelCase__ : Any =1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCamelCase__ : Union[str, Any] =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCamelCase__ : List[Any] =nn.parallel.DistributedDataParallel(
__lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCamelCase )
elif args.n_gpu > 1:
lowerCamelCase__ : int =nn.DataParallel(__lowerCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Prepare dataset
lowerCamelCase__ : Union[str, Any] =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCamelCase__ : Any =(torch.from_numpy(__lowerCamelCase ),)
lowerCamelCase__ : List[Any] =TensorDataset(*__lowerCamelCase )
lowerCamelCase__ : List[str] =RandomSampler(__lowerCamelCase )
lowerCamelCase__ : Dict =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCamelCase__ : Optional[int] =mask_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
prune_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 625 | 1 |
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def snake_case__ ( __lowerCamelCase : int = 3 ):
"""simple docstring"""
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(__lowerCamelCase ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
lowerCamelCase__ : List[str] =QuantumRegister(__lowerCamelCase , '''qr''' )
lowerCamelCase__ : Union[str, Any] =ClassicalRegister(__lowerCamelCase , '''cr''' )
lowerCamelCase__ : str =QuantumCircuit(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : int =number_of_qubits
for i in range(__lowerCamelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__lowerCamelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __lowerCamelCase , __lowerCamelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__lowerCamelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__lowerCamelCase , __lowerCamelCase )
# simulate with 10000 shots
lowerCamelCase__ : Optional[Any] =Aer.get_backend('''qasm_simulator''' )
lowerCamelCase__ : List[str] =execute(__lowerCamelCase , __lowerCamelCase , shots=10000 )
return job.result().get_counts(__lowerCamelCase )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 625 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =AutoConfig.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : Any =FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ='''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
lowerCamelCase__ : List[str] ='''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCamelCase__ : List[Any] ='''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[Any] ='''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : List[Any] =f'''layers_{str(__lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : str =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase__ : Dict =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : Tuple =tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase__ : str =flax_model.params['''encoder''']['''block'''][str(__lowerCamelCase )]['''layer''']
lowerCamelCase__ : int =tax_attention_key
lowerCamelCase__ : Optional[int] =tax_attention_out
lowerCamelCase__ : List[Any] =tax_attention_query
lowerCamelCase__ : Optional[Any] =tax_attention_value
lowerCamelCase__ : List[str] =tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] =tax_global_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
else:
lowerCamelCase__ : Union[str, Any] =tax_mlp_wi
lowerCamelCase__ : str =tax_mlp_wo
lowerCamelCase__ : Optional[Any] =tax_mlp_layer_norm
lowerCamelCase__ : Optional[int] =flax_model_encoder_layer_block
# Only for layer 0:
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : str =tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : Optional[int] =tax_encoder_global_rel_embedding
# Assigning
lowerCamelCase__ : int =tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
lowerCamelCase__ : List[Any] =tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : Dict =f'''layers_{str(__lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_module['''key''']['''kernel''']
lowerCamelCase__ : Any =tax_enc_dec_attention_module['''out''']['''kernel''']
lowerCamelCase__ : Dict =tax_enc_dec_attention_module['''query''']['''kernel''']
lowerCamelCase__ : List[str] =tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase__ : Any =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase__ : str =flax_model.params['''decoder''']['''block'''][str(__lowerCamelCase )]['''layer''']
lowerCamelCase__ : Union[str, Any] =tax_attention_key
lowerCamelCase__ : str =tax_attention_out
lowerCamelCase__ : Optional[int] =tax_attention_query
lowerCamelCase__ : Dict =tax_attention_value
lowerCamelCase__ : List[str] =tax_pre_attention_layer_norm
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_key
lowerCamelCase__ : Any =tax_enc_dec_attention_out
lowerCamelCase__ : Any =tax_enc_dec_attention_query
lowerCamelCase__ : Optional[int] =tax_enc_dec_attention_value
lowerCamelCase__ : Dict =tax_cross_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Tuple =tax_mlp_wi_a
lowerCamelCase__ : int =tax_mlp_wi_a
else:
lowerCamelCase__ : List[Any] =tax_mlp_wi
lowerCamelCase__ : Dict =tax_mlp_wo
lowerCamelCase__ : Tuple =txa_mlp_layer_norm
lowerCamelCase__ : Optional[Any] =flax_model_decoder_layer_block
# Decoder Normalization
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
lowerCamelCase__ : int =txa_decoder_norm
# Only for layer 0:
lowerCamelCase__ : Tuple =tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : Tuple =tax_decoder_rel_embedding
# Token Embeddings
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''token_embedder''']['''embedding''']
lowerCamelCase__ : Dict =txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCamelCase__ : int =tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(__lowerCamelCase )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
_lowercase : List[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 625 | 1 |
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : int = logging.get_logger(__name__)
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : str =SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
lowerCamelCase__ : List[Any] =MaskFormerConfig(backbone_config=__lowerCamelCase )
lowerCamelCase__ : List[str] ='''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
lowerCamelCase__ : Dict =847
lowerCamelCase__ : str ='''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
lowerCamelCase__ : int =150
lowerCamelCase__ : Tuple ='''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
lowerCamelCase__ : str =171
lowerCamelCase__ : str ='''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
lowerCamelCase__ : Any =133
lowerCamelCase__ : Any ='''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
lowerCamelCase__ : Union[str, Any] =19
lowerCamelCase__ : Any ='''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
lowerCamelCase__ : Tuple =65
lowerCamelCase__ : int ='''mapillary-vistas-id2label.json'''
lowerCamelCase__ : Tuple =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : Tuple ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : int =[]
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =dct.pop(__lowerCamelCase )
lowerCamelCase__ : Any =val
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Any =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase__ : Any =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase__ : Dict =state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
lowerCamelCase__ : Optional[int] =state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Optional[int] =in_proj_weight[:dim, :]
lowerCamelCase__ : Optional[Any] =in_proj_bias[: dim]
lowerCamelCase__ : Optional[Any] =in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase__ : str =in_proj_bias[
dim : dim * 2
]
lowerCamelCase__ : Any =in_proj_weight[
-dim :, :
]
lowerCamelCase__ : Optional[Any] =in_proj_bias[-dim :]
# fmt: on
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
# fmt: off
lowerCamelCase__ : Any =config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase__ : str =state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
lowerCamelCase__ : Dict =state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Optional[Any] =in_proj_weight[: hidden_size, :]
lowerCamelCase__ : int =in_proj_bias[:config.hidden_size]
lowerCamelCase__ : Optional[Any] =in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase__ : Union[str, Any] =in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase__ : List[Any] =in_proj_weight[-hidden_size :, :]
lowerCamelCase__ : int =in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase__ : Dict =state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
lowerCamelCase__ : Optional[Any] =state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : str =in_proj_weight[: hidden_size, :]
lowerCamelCase__ : Any =in_proj_bias[:config.hidden_size]
lowerCamelCase__ : List[Any] =in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase__ : Optional[Any] =in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase__ : Tuple =in_proj_weight[-hidden_size :, :]
lowerCamelCase__ : Any =in_proj_bias[-hidden_size :]
# fmt: on
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ : str =Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : bool = False ):
"""simple docstring"""
lowerCamelCase__ : int =get_maskformer_config(__lowerCamelCase )
# load original state_dict
with open(__lowerCamelCase , '''rb''' ) as f:
lowerCamelCase__ : Dict =pickle.load(__lowerCamelCase )
lowerCamelCase__ : List[str] =data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCamelCase__ : Optional[Any] =create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase , __lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
lowerCamelCase__ : int =torch.from_numpy(__lowerCamelCase )
# load 🤗 model
lowerCamelCase__ : List[str] =MaskFormerForInstanceSegmentation(__lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCamelCase , param.shape )
lowerCamelCase__ , lowerCamelCase__ : int =model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCamelCase ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
lowerCamelCase__ : Optional[Any] =prepare_img()
if "vistas" in model_name:
lowerCamelCase__ : List[Any] =65
elif "cityscapes" in model_name:
lowerCamelCase__ : str =65535
else:
lowerCamelCase__ : Any =255
lowerCamelCase__ : List[str] =True if '''ade''' in model_name else False
lowerCamelCase__ : Dict =MaskFormerImageProcessor(ignore_index=__lowerCamelCase , reduce_labels=__lowerCamelCase )
lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' )
lowerCamelCase__ : str =model(**__lowerCamelCase )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
lowerCamelCase__ : Optional[Any] =torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : Dict = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 625 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : List[Any]=32, lowerCamelCase : Dict=3, lowerCamelCase : int=4, lowerCamelCase : str=[10, 20, 30, 40], lowerCamelCase : Any=[2, 2, 3, 2], lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : str=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[int]=10, lowerCamelCase : Any=0.02, lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"], lowerCamelCase : Optional[int]=3, lowerCamelCase : Tuple=None, )-> List[str]:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Tuple =batch_size
lowerCamelCase__ : str =image_size
lowerCamelCase__ : Any =num_channels
lowerCamelCase__ : Tuple =num_stages
lowerCamelCase__ : List[str] =hidden_sizes
lowerCamelCase__ : Any =depths
lowerCamelCase__ : Union[str, Any] =is_training
lowerCamelCase__ : Tuple =use_labels
lowerCamelCase__ : int =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : Dict =type_sequence_label_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Any =out_features
lowerCamelCase__ : Tuple =num_labels
lowerCamelCase__ : Optional[int] =scope
lowerCamelCase__ : Optional[int] =num_stages
def snake_case ( self : str )-> Optional[int]:
lowerCamelCase__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple =None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int =self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] )-> Any:
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def snake_case ( self : Union[str, Any] )-> Any:
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowerCamelCase, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowerCamelCase, loss_ignore_index=255, num_labels=self.num_labels, )
def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ : List[str] =UperNetForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Dict =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any =config_and_inputs
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_a = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =UperNetModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Optional[int] )-> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] )-> Dict:
return
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
lowerCamelCase__ : Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple =[*signature.parameters.keys()]
lowerCamelCase__ : List[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def snake_case ( self : Any )-> Union[str, Any]:
lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def snake_case ( self : Optional[Any] )-> List[Any]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def snake_case ( self : Any )-> List[str]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : int )-> Any:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : Dict )-> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case ( self : List[Any] )-> List[str]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : Tuple )-> str:
pass
def snake_case ( self : Optional[int] )-> List[str]:
def check_hidden_states_output(lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[str] ):
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : List[str] =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : str =_config_zero_init(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =_config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def snake_case ( self : Any )-> str:
pass
@slow
def snake_case ( self : int )-> Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : str =UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
lowerCamelCase__ : List[str] =Image.open(__lowerCamelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : str )-> Union[str, Any]:
lowerCamelCase__ : List[Any] =AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
lowerCamelCase__ : List[Any] =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : List[Any] =prepare_img()
lowerCamelCase__ : List[Any] =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : Dict =torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
lowerCamelCase__ : Tuple =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : Dict =prepare_img()
lowerCamelCase__ : Any =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : Any =model(**lowerCamelCase )
lowerCamelCase__ : Dict =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : List[str] =torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
| 625 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[Any] )-> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case ( self : List[Any] )-> Optional[int]:
lowerCamelCase__ , lowerCamelCase__ : List[str] =FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''', revision='''bf16''', dtype=jnp.bfloataa, )
lowerCamelCase__ : str ='''A painting of a squirrel eating a burger'''
lowerCamelCase__ : List[str] =jax.device_count()
lowerCamelCase__ : Dict =num_samples * [prompt]
lowerCamelCase__ : Any =sd_pipe.prepare_inputs(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =replicate(lowerCamelCase )
lowerCamelCase__ : Optional[int] =shard(lowerCamelCase )
lowerCamelCase__ : Optional[int] =jax.random.PRNGKey(0 )
lowerCamelCase__ : Tuple =jax.random.split(lowerCamelCase, jax.device_count() )
lowerCamelCase__ : Tuple =sd_pipe(lowerCamelCase, lowerCamelCase, lowerCamelCase, num_inference_steps=25, jit=lowerCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCamelCase__ : int =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : int =images[0, 253:256, 253:256, -1]
lowerCamelCase__ : Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : int =jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : List[Any] ='''stabilityai/stable-diffusion-2'''
lowerCamelCase__ , lowerCamelCase__ : Any =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCamelCase, subfolder='''scheduler''' )
lowerCamelCase__ , lowerCamelCase__ : Tuple =FlaxStableDiffusionPipeline.from_pretrained(
lowerCamelCase, scheduler=lowerCamelCase, revision='''bf16''', dtype=jnp.bfloataa, )
lowerCamelCase__ : List[Any] =scheduler_params
lowerCamelCase__ : int ='''A painting of a squirrel eating a burger'''
lowerCamelCase__ : str =jax.device_count()
lowerCamelCase__ : List[str] =num_samples * [prompt]
lowerCamelCase__ : Optional[int] =sd_pipe.prepare_inputs(lowerCamelCase )
lowerCamelCase__ : str =replicate(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =shard(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[int] =jax.random.split(lowerCamelCase, jax.device_count() )
lowerCamelCase__ : Optional[int] =sd_pipe(lowerCamelCase, lowerCamelCase, lowerCamelCase, num_inference_steps=25, jit=lowerCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCamelCase__ : str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : int =images[0, 253:256, 253:256, -1]
lowerCamelCase__ : str =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Tuple =jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 625 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
_a = ['onnx']
def __init__( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : str )-> Optional[int]:
requires_backends(self, ['''onnx'''] )
@classmethod
def snake_case ( cls : List[str], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
@classmethod
def snake_case ( cls : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : Tuple )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
| 625 | 1 |
"""simple docstring"""
import os
from collections.abc import Iterator
def snake_case__ ( __lowerCamelCase : str = "." ):
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(__lowerCamelCase ):
lowerCamelCase__ : int =[d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCamelCase , __lowerCamelCase ).lstrip('''./''' )
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
return f'''{i * " "}*''' if i else "\n##"
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCamelCase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(__lowerCamelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def snake_case__ ( __lowerCamelCase : str = "." ):
"""simple docstring"""
lowerCamelCase__ : Any =''''''
for filepath in sorted(good_file_paths(__lowerCamelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =os.path.split(__lowerCamelCase )
if filepath != old_path:
lowerCamelCase__ : str =print_path(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =(filepath.count(os.sep ) + 1) if filepath else 0
lowerCamelCase__ : str =f'''{filepath}/{filename}'''.replace(''' ''' , '''%20''' )
lowerCamelCase__ : List[str] =os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f'''{md_prefix(__lowerCamelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 625 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =x
lowerCamelCase__ : Any =y
for step in range(__lowerCamelCase ): # noqa: B007
lowerCamelCase__ : List[Any] =a * a - b * b + x
lowerCamelCase__ : Optional[int] =2 * a * b + y
lowerCamelCase__ : Union[str, Any] =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) )
def snake_case__ ( __lowerCamelCase : int = 800 , __lowerCamelCase : int = 600 , __lowerCamelCase : float = -0.6 , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 3.2 , __lowerCamelCase : int = 50 , __lowerCamelCase : bool = True , ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =Image.new('''RGB''' , (image_width, image_height) )
lowerCamelCase__ : Optional[int] =img.load()
# loop through the image-coordinates
for image_x in range(__lowerCamelCase ):
for image_y in range(__lowerCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase__ : Optional[Any] =figure_width / image_width * image_height
lowerCamelCase__ : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase__ : Optional[int] =figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase__ : Any =get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase__ : int =get_color_coded_rgb(__lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] =get_black_and_white_rgb(__lowerCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 625 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Tuple = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = ["ConditionalDetrFeatureExtractor"]
_lowercase : str = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 625 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =VideoMAEConfig()
set_architecture_configs(__lowerCamelCase , __lowerCamelCase )
if "finetuned" not in model_name:
lowerCamelCase__ : int =False
if "finetuned" in model_name:
lowerCamelCase__ : str ='''huggingface/label-files'''
if "kinetics" in model_name:
lowerCamelCase__ : List[Any] =400
lowerCamelCase__ : Optional[int] ='''kinetics400-id2label.json'''
elif "ssv2" in model_name:
lowerCamelCase__ : Tuple =174
lowerCamelCase__ : Optional[Any] ='''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : List[Any] ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict =idalabel
lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()}
return config
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if "small" in model_name:
lowerCamelCase__ : Optional[Any] =384
lowerCamelCase__ : List[Any] =1536
lowerCamelCase__ : int =12
lowerCamelCase__ : Dict =16
lowerCamelCase__ : List[Any] =12
lowerCamelCase__ : Optional[Any] =3
lowerCamelCase__ : Union[str, Any] =192
lowerCamelCase__ : str =768
elif "large" in model_name:
lowerCamelCase__ : Union[str, Any] =1024
lowerCamelCase__ : str =4096
lowerCamelCase__ : int =24
lowerCamelCase__ : Dict =16
lowerCamelCase__ : Union[str, Any] =12
lowerCamelCase__ : List[Any] =8
lowerCamelCase__ : int =512
lowerCamelCase__ : Optional[Any] =2048
elif "huge" in model_name:
lowerCamelCase__ : Optional[int] =1280
lowerCamelCase__ : Optional[int] =5120
lowerCamelCase__ : List[Any] =32
lowerCamelCase__ : List[Any] =16
lowerCamelCase__ : Optional[Any] =12
lowerCamelCase__ : Dict =8
lowerCamelCase__ : List[Any] =640
lowerCamelCase__ : Any =2560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
if "encoder." in name:
lowerCamelCase__ : Optional[int] =name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
lowerCamelCase__ : List[Any] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase__ : Any =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ : List[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowerCamelCase__ : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : int =name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowerCamelCase__ : Any =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowerCamelCase__ : Any =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : str =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
lowerCamelCase__ : List[str] =name.replace('''head''' , '''classifier''' )
return name
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase )
if key.startswith('''encoder.''' ):
lowerCamelCase__ : Optional[int] =key.replace('''encoder.''' , '''''' )
if "qkv" in key:
lowerCamelCase__ : Any =key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
lowerCamelCase__ : Tuple =config.decoder_hidden_size
lowerCamelCase__ : str =int(key_split[2] )
lowerCamelCase__ : Any ='''decoder.decoder_layers.'''
if "weight" in key:
lowerCamelCase__ : List[Any] =val[:dim, :]
lowerCamelCase__ : Any =val[dim : dim * 2, :]
lowerCamelCase__ : Dict =val[-dim:, :]
else:
lowerCamelCase__ : Optional[Any] =config.hidden_size
lowerCamelCase__ : Optional[Any] =int(key_split[1] )
lowerCamelCase__ : str ='''videomae.encoder.layer.'''
if "weight" in key:
lowerCamelCase__ : int =val[:dim, :]
lowerCamelCase__ : Tuple =val[dim : dim * 2, :]
lowerCamelCase__ : List[Any] =val[-dim:, :]
else:
lowerCamelCase__ : int =val
return orig_state_dict
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCamelCase__ : Optional[Any] =np.load(__lowerCamelCase )
return list(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : str =get_videomae_config(__lowerCamelCase )
if "finetuned" in model_name:
lowerCamelCase__ : Tuple =VideoMAEForVideoClassification(__lowerCamelCase )
else:
lowerCamelCase__ : int =VideoMAEForPreTraining(__lowerCamelCase )
# download original checkpoint, hosted on Google Drive
lowerCamelCase__ : Union[str, Any] ='''pytorch_model.bin'''
gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )
if "model" in files:
lowerCamelCase__ : Dict =files['''model''']
else:
lowerCamelCase__ : str =files['''module''']
lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify model on basic input
lowerCamelCase__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCamelCase__ : int =prepare_video()
lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' )
if "finetuned" not in model_name:
lowerCamelCase__ : Tuple =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase )
lowerCamelCase__ : int =model(**__lowerCamelCase )
lowerCamelCase__ : Dict =outputs.logits
lowerCamelCase__ : List[str] =[
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCamelCase__ : int =torch.Size([1, 174] )
lowerCamelCase__ : Dict =torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
lowerCamelCase__ : List[str] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
lowerCamelCase__ : List[Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[str] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCamelCase__ : str =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[Any] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : Optional[int] =torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCamelCase__ : List[str] =torch.Size([1, 400] )
lowerCamelCase__ : Dict =torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCamelCase__ : str =torch.Size([1, 400] )
lowerCamelCase__ : Any =torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
lowerCamelCase__ : Tuple =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCamelCase__ : Optional[int] =torch.Size([1, 174] )
lowerCamelCase__ : Any =torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : str =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCamelCase__ : str =torch.Size([1, 174] )
lowerCamelCase__ : int =torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCamelCase__ : str =outputs.loss
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__lowerCamelCase , organization='''nielsr''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 625 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : str = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 625 |
"""simple docstring"""
_lowercase : str = 0 # The first color of the flag.
_lowercase : Dict = 1 # The second color of the flag.
_lowercase : Tuple = 2 # The third color of the flag.
_lowercase : Optional[int] = (red, white, blue)
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
if not sequence:
return []
if len(__lowerCamelCase ) == 1:
return list(__lowerCamelCase )
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Dict =len(__lowerCamelCase ) - 1
lowerCamelCase__ : Tuple =0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =sequence[high], sequence[mid]
high -= 1
else:
lowerCamelCase__ : Dict =f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[Any] = input("Enter numbers separated by commas:\n").strip()
_lowercase : int = [int(item.strip()) for item in user_input.split(",")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 625 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
_lowercase : Dict = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
_lowercase : str = TaTokenizerFast
_lowercase : Optional[int] = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
_lowercase : Optional[Any] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 625 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = StableUnCLIPImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def snake_case ( self : List[str] )-> str:
lowerCamelCase__ : Dict =32
lowerCamelCase__ : Optional[Any] =embedder_hidden_size
# image encoding components
lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase__ : Tuple =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowerCamelCase__ : Dict =UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =DDIMScheduler(
beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =AutoencoderKL()
lowerCamelCase__ : int ={
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
lowerCamelCase__ : int =input_image * 0.5 + 0.5
lowerCamelCase__ : Dict =input_image.clamp(0, 1 )
lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase )
lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase )
inputs.update({'''image_embeds''': None} )
lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images
lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def snake_case ( self : List[str] )-> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : List[Any] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : Tuple =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : List[Any] =pipe(
lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', )
lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 625 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[str] =int(__lowerCamelCase )
if n_element < 1:
lowerCamelCase__ : List[str] =ValueError('''a should be a positive number''' )
raise my_error
lowerCamelCase__ : str =[1]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =(0, 0, 0)
lowerCamelCase__ : Any =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_lowercase : int = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
_lowercase : List[str] = hamming(int(n))
print("-----------------------------------------------------")
print(f'The list with nth numbers is: {hamming_numbers}')
print("-----------------------------------------------------")
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 4000000 ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b
return sum(__lowerCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 625 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def snake_case__ ( __lowerCamelCase : list[float] ):
"""simple docstring"""
return np.maximum(0 , __lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 625 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = BlenderbotSmallConfig
_a = {}
_a = 'gelu'
def __init__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=False, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : str=32, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=20, lowerCamelCase : int=2, lowerCamelCase : Any=1, lowerCamelCase : Optional[Any]=0, )-> List[str]:
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Tuple =is_training
lowerCamelCase__ : Dict =use_labels
lowerCamelCase__ : List[Any] =vocab_size
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : str =num_hidden_layers
lowerCamelCase__ : Union[str, Any] =num_attention_heads
lowerCamelCase__ : Any =intermediate_size
lowerCamelCase__ : Dict =hidden_dropout_prob
lowerCamelCase__ : List[Any] =attention_probs_dropout_prob
lowerCamelCase__ : str =max_position_embeddings
lowerCamelCase__ : Optional[int] =eos_token_id
lowerCamelCase__ : str =pad_token_id
lowerCamelCase__ : Union[str, Any] =bos_token_id
def snake_case ( self : Any )-> Any:
lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCamelCase__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Any =tf.concat([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : int =self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCamelCase__ : Optional[int] =prepare_blenderbot_small_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return config, inputs_dict
def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Any )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder()
lowerCamelCase__ : List[Any] =inputs_dict['''input_ids''']
lowerCamelCase__ : Optional[int] =input_ids[:1, :]
lowerCamelCase__ : str =inputs_dict['''attention_mask'''][:1, :]
lowerCamelCase__ : Union[str, Any] =inputs_dict['''head_mask''']
lowerCamelCase__ : Optional[Any] =1
# first forward pass
lowerCamelCase__ : Dict =model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : List[str] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowerCamelCase__ : List[str] =tf.concat([input_ids, next_tokens], axis=-1 )
lowerCamelCase__ : str =tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase, attention_mask=lowerCamelCase )[0]
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : Tuple =int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowerCamelCase__ : int =output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : List[str] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-3 )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : List[str] =tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : str =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_a = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_a = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a = True
_a = False
_a = False
def snake_case ( self : Any )-> str:
lowerCamelCase__ : Tuple =TFBlenderbotSmallModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase )
def snake_case ( self : Any )-> Optional[int]:
self.config_tester.run_common_tests()
def snake_case ( self : int )-> str:
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
_a = 'facebook/blenderbot_small-90M'
@cached_property
def snake_case ( self : Any )-> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Dict =self.tokenizer(self.src_text, return_tensors='''tf''' )
lowerCamelCase__ : Any =self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase, )
lowerCamelCase__ : Any =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 625 | 1 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
lowerCamelCase__ : Optional[int] =b * b - 4 * a * c
lowerCamelCase__ : List[str] =(-b + sqrt(__lowerCamelCase )) / (2 * a)
lowerCamelCase__ : int =(-b - sqrt(__lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : int =quadratic_roots(a=5 , b=6 , c=1 )
print(f'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ):
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
# Base Case
if curr_ind == len(__lowerCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__lowerCamelCase ) ):
if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Insert current vertex into path as next transition
lowerCamelCase__ : Tuple =next_ver
# Validate created path
if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase__ : int =-1
return False
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase__ : Union[str, Any] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
| 625 | 1 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =old_name
if "patch_embed" in old_name:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str =old_name.split('''.''' )
if layer == "0":
lowerCamelCase__ : List[Any] =old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
lowerCamelCase__ : Union[str, Any] =old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
lowerCamelCase__ : Any =old_name.replace('''3''' , '''convolution2''' )
else:
lowerCamelCase__ : List[str] =old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , __lowerCamelCase ):
lowerCamelCase__ : Optional[int] =R'''\b\d{2}\b'''
if bool(re.search(__lowerCamelCase , __lowerCamelCase ) ):
lowerCamelCase__ : List[Any] =re.search(R'''\d\.\d\d.''' , __lowerCamelCase ).group()
else:
lowerCamelCase__ : Any =re.search(R'''\d\.\d.''' , __lowerCamelCase ).group()
if int(match[0] ) < 6:
lowerCamelCase__ : Any =old_name.replace(__lowerCamelCase , '''''' )
lowerCamelCase__ : int =trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
lowerCamelCase__ : str ='''intermediate_stages.''' + trimmed_name
else:
lowerCamelCase__ : Optional[Any] =old_name.replace(__lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
lowerCamelCase__ : Any =trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
lowerCamelCase__ : Optional[int] =str(int(match[2] ) - num_meta4D_last_stage )
lowerCamelCase__ : Union[str, Any] =trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
lowerCamelCase__ : Any =trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
lowerCamelCase__ : str =trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
lowerCamelCase__ : List[Any] =trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
lowerCamelCase__ : Dict =trimmed_name.replace('''fc2''' , '''linear_out''' )
lowerCamelCase__ : List[Any] ='''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , __lowerCamelCase ):
lowerCamelCase__ : Dict =old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
lowerCamelCase__ : Dict =new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowerCamelCase__ : Tuple =new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowerCamelCase__ : Dict =new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
lowerCamelCase__ : Any =new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
lowerCamelCase__ : int =new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
lowerCamelCase__ : List[Any] =new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
lowerCamelCase__ : int ='''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowerCamelCase__ : Optional[int] =new_name.replace('''norm''' , '''layernorm''' )
lowerCamelCase__ : List[str] ='''efficientformer.''' + new_name
else:
lowerCamelCase__ : Union[str, Any] ='''efficientformer.encoder.''' + new_name
return new_name
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
for key in checkpoint.copy().keys():
lowerCamelCase__ : List[Any] =checkpoint.pop(__lowerCamelCase )
lowerCamelCase__ : Dict =val
return checkpoint
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ : Optional[Any] =Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return image
def snake_case__ ( __lowerCamelCase : Path , __lowerCamelCase : Path , __lowerCamelCase : Path , __lowerCamelCase : bool ):
"""simple docstring"""
lowerCamelCase__ : List[str] =torch.load(__lowerCamelCase , map_location='''cpu''' )['''model''']
lowerCamelCase__ : Optional[Any] =EfficientFormerConfig.from_json_file(__lowerCamelCase )
lowerCamelCase__ : Dict =EfficientFormerForImageClassificationWithTeacher(__lowerCamelCase )
lowerCamelCase__ : int ='''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
lowerCamelCase__ : Union[str, Any] =config.depths[-1] - config.num_metaad_blocks + 1
lowerCamelCase__ : List[str] =convert_torch_checkpoint(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
lowerCamelCase__ : str ={
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
lowerCamelCase__ : Optional[int] =prepare_img()
lowerCamelCase__ : Optional[int] =256
lowerCamelCase__ : Optional[int] =224
lowerCamelCase__ : int =EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
lowerCamelCase__ : Tuple =processor(images=__lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
lowerCamelCase__ : int =Compose(
[
Resize(__lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(__lowerCamelCase ),
ToTensor(),
Normalize(__lowerCamelCase , __lowerCamelCase ),
] )
lowerCamelCase__ : List[Any] =image_transforms(__lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model(__lowerCamelCase )
lowerCamelCase__ : Tuple =outputs.logits
lowerCamelCase__ : Tuple =(1, 1000)
if "l1" in model_name:
lowerCamelCase__ : Optional[Any] =torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , __lowerCamelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowerCamelCase__ : List[str] =torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , __lowerCamelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowerCamelCase__ : str =torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(__lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=__lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=__lowerCamelCase , )
if __name__ == "__main__":
_lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
_lowercase : List[str] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 625 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase : List[str] = 2_5_0_0_0_4
_lowercase : Optional[Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = MBartTokenizer
_a = MBartTokenizerFast
_a = True
_a = True
def snake_case ( self : Tuple )-> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Union[str, Any] =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Dict )-> Union[str, Any]:
lowerCamelCase__ : Any =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
lowerCamelCase__ : List[Any] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowerCamelCase__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
lowerCamelCase__ : str =tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def snake_case ( self : Tuple )-> List[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ : int =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[str] =tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase__ : List[str] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Any =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ : Dict =tempfile.mkdtemp()
lowerCamelCase__ : List[str] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Tuple =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Optional[int] =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Any =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ : Optional[int] =tempfile.mkdtemp()
lowerCamelCase__ : int =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Dict =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : int =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = 'facebook/mbart-large-en-ro'
_a = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_a = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_a = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case ( cls : List[Any] )-> Optional[int]:
lowerCamelCase__ : MBartTokenizer =MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' )
lowerCamelCase__ : Optional[int] =1
return cls
def snake_case ( self : Optional[Any] )-> List[str]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_0020 )
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
def snake_case ( self : Optional[Any] )-> str:
self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids )
lowerCamelCase__ : Optional[int] =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Optional[int] =['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0], lowerCamelCase )
lowerCamelCase__ : Dict =10
lowerCamelCase__ : Optional[int] =self.tokenizer(lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], lowerCamelCase )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
def snake_case ( self : int )-> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_0026, 25_0001] )
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : int =tempfile.mkdtemp()
lowerCamelCase__ : Optional[int] =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =MBartTokenizer.from_pretrained(lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase )
@require_torch
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ : Optional[Any] =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, return_tensors='''pt''' )
lowerCamelCase__ : Dict =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case ( self : Optional[Any] )-> Any:
lowerCamelCase__ : str =self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', )
lowerCamelCase__ : List[Any] =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
lowerCamelCase__ : Any =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case ( self : List[Any] )-> Dict:
lowerCamelCase__ : Any =self.tokenizer(self.src_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=3, return_tensors='''pt''' )
lowerCamelCase__ : Tuple =self.tokenizer(
text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=10, return_tensors='''pt''' )
lowerCamelCase__ : Union[str, Any] =targets['''input_ids''']
lowerCamelCase__ : List[Any] =shift_tokens_right(lowerCamelCase, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : str =self.tokenizer._build_translation_inputs(
'''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase ), {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 25_0004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
}, )
| 625 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =''''''
for i in table:
res += inp[i - 1]
return res
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
return data[1:] + data[0]
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : Any ):
"""simple docstring"""
lowerCamelCase__ : Any =''''''
for i in range(len(__lowerCamelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Any =int('''0b''' + data[0] + data[-1] , 2 )
lowerCamelCase__ : Any =int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : str =message[:4]
lowerCamelCase__ : List[Any] =message[4:]
lowerCamelCase__ : List[Any] =apply_table(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Dict =xor(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Tuple =apply_sbox(__lowerCamelCase , temp[:4] ) # noqa: E741
lowerCamelCase__ : Dict =apply_sbox(__lowerCamelCase , temp[4:] )
lowerCamelCase__ : List[str] ='''0''' * (2 - len(__lowerCamelCase )) + l # noqa: E741
lowerCamelCase__ : Dict ='''0''' * (2 - len(__lowerCamelCase )) + r
lowerCamelCase__ : Tuple =apply_table(l + r , __lowerCamelCase )
lowerCamelCase__ : str =xor(__lowerCamelCase , __lowerCamelCase )
return temp + right
if __name__ == "__main__":
_lowercase : Optional[Any] = input("Enter 10 bit key: ")
_lowercase : List[str] = input("Enter 8 bit message: ")
_lowercase : List[Any] = [6, 3, 7, 4, 8, 5, 1_0, 9]
_lowercase : Any = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
_lowercase : List[Any] = [2, 4, 3, 1]
_lowercase : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7]
_lowercase : Optional[int] = [4, 1, 3, 5, 7, 2, 8, 6]
_lowercase : Any = [4, 1, 2, 3, 2, 3, 4, 1]
_lowercase : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_lowercase : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_lowercase : Dict = apply_table(key, paa_table)
_lowercase : Any = temp[:5]
_lowercase : str = temp[5:]
_lowercase : List[str] = left_shift(left)
_lowercase : Union[str, Any] = left_shift(right)
_lowercase : Tuple = apply_table(left + right, pa_table)
_lowercase : Dict = left_shift(left)
_lowercase : Tuple = left_shift(right)
_lowercase : List[str] = left_shift(left)
_lowercase : Dict = left_shift(right)
_lowercase : Optional[int] = apply_table(left + right, pa_table)
# encryption
_lowercase : Tuple = apply_table(message, IP)
_lowercase : Optional[Any] = function(expansion, sa, sa, keya, temp)
_lowercase : Union[str, Any] = temp[4:] + temp[:4]
_lowercase : Dict = function(expansion, sa, sa, keya, temp)
_lowercase : List[Any] = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_lowercase : List[Any] = apply_table(CT, IP)
_lowercase : List[Any] = function(expansion, sa, sa, keya, temp)
_lowercase : Union[str, Any] = temp[4:] + temp[:4]
_lowercase : Any = function(expansion, sa, sa, keya, temp)
_lowercase : int = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 625 | 1 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_lowercase : Any = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_lowercase : Dict = get_tests_dir("fixtures/vocab.json")
_lowercase : List[str] = get_tests_dir("fixtures")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
def snake_case ( self : int )-> str:
lowerCamelCase__ : List[str] =0
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[str] =AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : Tuple =WavaVecaConfig()
lowerCamelCase__ : str =AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Tuple =AutoProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any] )-> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowerCamelCase, os.path.join(lowerCamelCase, lowerCamelCase ) )
copyfile(lowerCamelCase, os.path.join(lowerCamelCase, '''vocab.json''' ) )
lowerCamelCase__ : int =AutoProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[Any] )-> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : int =WavaVecaFeatureExtractor()
lowerCamelCase__ : Optional[Any] =AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
lowerCamelCase__ : Tuple =WavaVecaProcessor(lowerCamelCase, lowerCamelCase )
# save in new folder
processor.save_pretrained(lowerCamelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(lowerCamelCase, lowerCamelCase ), '''r''' ) as f:
lowerCamelCase__ : Optional[Any] =json.load(lowerCamelCase )
config_dict.pop('''processor_class''' )
with open(os.path.join(lowerCamelCase, lowerCamelCase ), '''w''' ) as f:
f.write(json.dumps(lowerCamelCase ) )
lowerCamelCase__ : Union[str, Any] =AutoProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Dict )-> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : int =WavaVecaFeatureExtractor()
lowerCamelCase__ : int =AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
lowerCamelCase__ : Union[str, Any] =WavaVecaProcessor(lowerCamelCase, lowerCamelCase )
# save in new folder
processor.save_pretrained(lowerCamelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(lowerCamelCase, lowerCamelCase ), '''r''' ) as f:
lowerCamelCase__ : Union[str, Any] =json.load(lowerCamelCase )
config_dict.pop('''processor_class''' )
with open(os.path.join(lowerCamelCase, lowerCamelCase ), '''w''' ) as f:
f.write(json.dumps(lowerCamelCase ) )
lowerCamelCase__ : Any =AutoProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Dict )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : List[str] =WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(lowerCamelCase )
# copy relevant files
copyfile(lowerCamelCase, os.path.join(lowerCamelCase, '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(lowerCamelCase, lowerCamelCase ), '''w''' ) as f:
f.write('''{}''' )
lowerCamelCase__ : Any =AutoProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase ):
lowerCamelCase__ : Optional[Any] =AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase ):
lowerCamelCase__ : Any =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''', trust_remote_code=lowerCamelCase )
lowerCamelCase__ : Any =AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''', trust_remote_code=lowerCamelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__, '''NewProcessor''' )
lowerCamelCase__ : List[Any] =processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__, '''NewFeatureExtractor''' )
lowerCamelCase__ : Dict =processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__, '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCamelCase__ : Optional[Any] =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''', trust_remote_code=lowerCamelCase, use_fast=lowerCamelCase )
lowerCamelCase__ : Tuple =new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__, '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__, '''NewTokenizer''' )
def snake_case ( self : Any )-> List[str]:
try:
AutoConfig.register('''custom''', lowerCamelCase )
AutoFeatureExtractor.register(lowerCamelCase, lowerCamelCase )
AutoTokenizer.register(lowerCamelCase, slow_tokenizer_class=lowerCamelCase )
AutoProcessor.register(lowerCamelCase, lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase ):
AutoProcessor.register(lowerCamelCase, lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase__ : List[str] =CustomFeatureExtractor.from_pretrained(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : Any =os.path.join(lowerCamelCase, '''vocab.txt''' )
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowerCamelCase__ : List[str] =CustomTokenizer(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =CustomProcessor(lowerCamelCase, lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowerCamelCase )
lowerCamelCase__ : List[str] =AutoProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def snake_case ( self : Tuple )-> Optional[Any]:
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = False
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = False
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'AutoFeatureExtractor'
_a = 'AutoTokenizer'
_a = False
try:
AutoConfig.register('''custom''', lowerCamelCase )
AutoFeatureExtractor.register(lowerCamelCase, lowerCamelCase )
AutoTokenizer.register(lowerCamelCase, slow_tokenizer_class=lowerCamelCase )
AutoProcessor.register(lowerCamelCase, lowerCamelCase )
# If remote code is not set, the default is to use local classes.
lowerCamelCase__ : str =AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__, '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowerCamelCase__ : Tuple =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''', trust_remote_code=lowerCamelCase )
self.assertEqual(processor.__class__.__name__, '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowerCamelCase__ : str =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''', trust_remote_code=lowerCamelCase )
self.assertEqual(processor.__class__.__name__, '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def snake_case ( self : Union[str, Any] )-> int:
lowerCamelCase__ : int =AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__, '''BertTokenizerFast''' )
def snake_case ( self : Union[str, Any] )-> int:
lowerCamelCase__ : Any =AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__, '''ConvNextImageProcessor''' )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def snake_case ( cls : Dict )-> str:
lowerCamelCase__ : int =TOKEN
HfFolder.save_token(lowerCamelCase )
@classmethod
def snake_case ( cls : Tuple )-> Dict:
try:
delete_repo(token=cls._token, repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def snake_case ( self : Any )-> int:
lowerCamelCase__ : Optional[int] =WavaVecaProcessor.from_pretrained(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCamelCase, '''test-processor''' ), push_to_hub=lowerCamelCase, use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] =WavaVecaProcessor.from_pretrained(F'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase, getattr(new_processor.feature_extractor, lowerCamelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab(), processor.tokenizer.get_vocab() )
def snake_case ( self : Dict )-> List[str]:
lowerCamelCase__ : List[str] =WavaVecaProcessor.from_pretrained(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCamelCase, '''test-processor-org''' ), push_to_hub=lowerCamelCase, use_auth_token=self._token, organization='''valid_org''', )
lowerCamelCase__ : List[Any] =WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase, getattr(new_processor.feature_extractor, lowerCamelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab(), processor.tokenizer.get_vocab() )
def snake_case ( self : Any )-> str:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowerCamelCase__ : Tuple =CustomFeatureExtractor.from_pretrained(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : Tuple =os.path.join(lowerCamelCase, '''vocab.txt''' )
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowerCamelCase__ : Optional[Any] =CustomTokenizer(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =CustomProcessor(lowerCamelCase, lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'''{USER}/test-dynamic-processor''', token=self._token )
lowerCamelCase__ : Any =Repository(lowerCamelCase, clone_from=F'''{USER}/test-dynamic-processor''', token=self._token )
processor.save_pretrained(lowerCamelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map, {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
}, )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowerCamelCase, '''tokenizer_config.json''' ) ) as f:
lowerCamelCase__ : Dict =json.load(lowerCamelCase )
self.assertDictEqual(
tokenizer_config['''auto_map'''], {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
}, )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase, '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase, '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase, '''custom_processing.py''' ) ) )
repo.push_to_hub()
lowerCamelCase__ : Union[str, Any] =AutoProcessor.from_pretrained(F'''{USER}/test-dynamic-processor''', trust_remote_code=lowerCamelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__, '''CustomProcessor''' )
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 22 ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =range(1 , __lowerCamelCase )
lowerCamelCase__ : str =range(1 , __lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }')
| 625 | 1 |
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : list[list[int]] =[]
create_all_state(1 , __lowerCamelCase , __lowerCamelCase , [] , __lowerCamelCase )
return result
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , ):
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__lowerCamelCase , total_number - level + 2 ):
current_list.append(__lowerCamelCase )
create_all_state(i + 1 , __lowerCamelCase , level - 1 , __lowerCamelCase , __lowerCamelCase )
current_list.pop()
def snake_case__ ( __lowerCamelCase : list[list[int]] ):
"""simple docstring"""
for i in total_list:
print(*__lowerCamelCase )
if __name__ == "__main__":
_lowercase : int = 4
_lowercase : List[str] = 2
_lowercase : List[Any] = generate_all_combinations(n, k)
print_all_state(total_list)
| 625 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def snake_case ( self : Dict, lowerCamelCase : List[str], lowerCamelCase : Any )-> Union[str, Any]:
pass
def snake_case ( self : List[str] )-> List[str]:
pass
def snake_case ( self : Optional[Any] )-> str:
pass
def snake_case ( self : Union[str, Any], lowerCamelCase : np.ndarray, lowerCamelCase : np.ndarray, lowerCamelCase : float )-> Dict:
lowerCamelCase__ : Union[str, Any] =np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Any=None, **lowerCamelCase : str )-> int:
lowerCamelCase__ : List[str] =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Dict =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str=None, **lowerCamelCase : List[Any] )-> int:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=None, **lowerCamelCase : int )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Optional[int] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[Any] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : int =output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : List[str] =after_output[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-3 )
def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[str] =model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase )
lowerCamelCase__ : int =output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Tuple =to_atuple(vision_model.config.image_size )
lowerCamelCase__ : Optional[Any] =to_atuple(vision_model.config.patch_size )
lowerCamelCase__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCamelCase__ : int =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCamelCase__ : List[Any] =output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] )-> Any:
pt_model.to(lowerCamelCase )
pt_model.eval()
# prepare inputs
lowerCamelCase__ : Any =inputs_dict
lowerCamelCase__ : Any ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCamelCase__ : List[str] =pt_model(**lowerCamelCase ).to_tuple()
lowerCamelCase__ : Optional[Any] =fx_model(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase )
lowerCamelCase__ : List[Any] =fx_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : str =VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase )
pt_model_loaded.to(lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2 )
def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : str )-> List[Any]:
lowerCamelCase__ : Any =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : str =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase )
lowerCamelCase__ : Tuple =fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ : Dict =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Any =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : int =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase )
def snake_case ( self : Tuple )-> Any:
lowerCamelCase__ : Tuple =self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase )
def snake_case ( self : str )-> Any:
lowerCamelCase__ : str =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase )
@is_pt_flax_cross_test
def snake_case ( self : Tuple )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs()
lowerCamelCase__ : Union[str, Any] =config_inputs_dict.pop('''vision_config''' )
lowerCamelCase__ : Optional[Any] =config_inputs_dict.pop('''text_config''' )
lowerCamelCase__ : Tuple =config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase )
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@slow
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.get_pretrained_model_and_inputs()
lowerCamelCase__ : Optional[int] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[str] =outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase )
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[Any] =after_outputs[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-5 )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : List[str] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : List[str] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : Optional[int] =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Any ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : int )-> int:
lowerCamelCase__ : str =FlaxViTModel(lowerCamelCase )
lowerCamelCase__ : Any =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : int )-> Optional[int]:
lowerCamelCase__ : Any =FlaxViTModelTester(self )
lowerCamelCase__ : Union[str, Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =vit_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Any =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Union[str, Any] =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : Optional[Any] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : Union[str, Any] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : str =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Dict )-> Dict:
lowerCamelCase__ : str =FlaxCLIPVisionModel(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[Any] =FlaxCLIPVisionModelTester(self )
lowerCamelCase__ : List[Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =clip_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[int] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : List[Any] =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Any =FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase__ : Dict =processor(
text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' )
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
lowerCamelCase__ : Any =np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3 ) )
| 625 | 1 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
_lowercase : Dict = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_lowercase : Optional[Any] = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_lowercase : Any = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def snake_case ( self : str )-> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ), reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''], )
def snake_case ( self : int, lowerCamelCase : Optional[int], lowerCamelCase : int, lowerCamelCase : Any=None, lowerCamelCase : Dict=1, lowerCamelCase : Optional[Any]="binary", lowerCamelCase : List[Any]=None )-> Tuple:
lowerCamelCase__ : Dict =fa_score(
lowerCamelCase, lowerCamelCase, labels=lowerCamelCase, pos_label=lowerCamelCase, average=lowerCamelCase, sample_weight=lowerCamelCase )
return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if index == number_of_items:
return 0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : List[str] =knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 )
if weights[index] <= max_weight:
lowerCamelCase__ : Dict =values[index] + knapsack(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_weight - weights[index] , index + 1 )
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = 42
_a = 42
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple, lowerCamelCase : int )-> str:
lowerCamelCase__ : list[list[Edge]] =[[] for _ in range(lowerCamelCase )]
lowerCamelCase__ : Tuple =size
def __getitem__( self : List[Any], lowerCamelCase : int )-> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def snake_case ( self : Optional[int] )-> List[str]:
return self._size
def snake_case ( self : Dict, lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int )-> int:
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : int )-> int | None:
lowerCamelCase__ : Tuple =deque([start_vertex] )
lowerCamelCase__ : list[int | None] =[None] * self.size
lowerCamelCase__ : List[str] =0
while queue:
lowerCamelCase__ : Union[str, Any] =queue.popleft()
lowerCamelCase__ : int =distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCamelCase__ : str =current_distance + edge.weight
lowerCamelCase__ : List[str] =distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowerCamelCase__ : List[Any] =new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 |
"""simple docstring"""
_lowercase : Optional[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 625 | 1 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_a = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : Optional[int] = None, lowerCamelCase : int = 5_0257, lowerCamelCase : int = 1024, lowerCamelCase : int = 768, lowerCamelCase : int = 12, lowerCamelCase : int = 12, lowerCamelCase : Optional[int] = None, lowerCamelCase : str = "gelu_new", lowerCamelCase : float = 0.1, lowerCamelCase : float = 0.1, lowerCamelCase : float = 0.1, lowerCamelCase : float = 1E-5, lowerCamelCase : float = 0.02, lowerCamelCase : bool = True, lowerCamelCase : bool = True, lowerCamelCase : bool = False, lowerCamelCase : bool = False, )-> Optional[Any]:
super().__init__()
lowerCamelCase__ : List[Any] =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
lowerCamelCase__ : List[Any] =prefix_inner_dim
lowerCamelCase__ : Tuple =prefix_hidden_dim
lowerCamelCase__ : Any =(
nn.Linear(self.prefix_inner_dim, self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCamelCase__ : List[str] =(
nn.Linear(self.prefix_hidden_dim, lowerCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCamelCase__ : int =GPTaConfig(
vocab_size=lowerCamelCase, n_positions=lowerCamelCase, n_embd=lowerCamelCase, n_layer=lowerCamelCase, n_head=lowerCamelCase, n_inner=lowerCamelCase, activation_function=lowerCamelCase, resid_pdrop=lowerCamelCase, embd_pdrop=lowerCamelCase, attn_pdrop=lowerCamelCase, layer_norm_epsilon=lowerCamelCase, initializer_range=lowerCamelCase, scale_attn_weights=lowerCamelCase, use_cache=lowerCamelCase, scale_attn_by_inverse_layer_idx=lowerCamelCase, reorder_and_upcast_attn=lowerCamelCase, )
lowerCamelCase__ : Tuple =GPTaLMHeadModel(lowerCamelCase )
def snake_case ( self : List[str], lowerCamelCase : torch.Tensor, lowerCamelCase : torch.Tensor, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, )-> int:
lowerCamelCase__ : int =self.transformer.transformer.wte(lowerCamelCase )
lowerCamelCase__ : Dict =self.encode_prefix(lowerCamelCase )
lowerCamelCase__ : Tuple =self.decode_prefix(lowerCamelCase )
lowerCamelCase__ : List[Any] =torch.cat((prefix_embeds, embedding_text), dim=1 )
if labels is not None:
lowerCamelCase__ : Dict =self.get_dummy_token(input_ids.shape[0], input_ids.device )
lowerCamelCase__ : List[str] =torch.cat((dummy_token, input_ids), dim=1 )
lowerCamelCase__ : Union[str, Any] =self.transformer(inputs_embeds=lowerCamelCase, labels=lowerCamelCase, attention_mask=lowerCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case ( self : List[str], lowerCamelCase : int, lowerCamelCase : torch.device )-> torch.Tensor:
return torch.zeros(lowerCamelCase, self.prefix_length, dtype=torch.intaa, device=lowerCamelCase )
def snake_case ( self : str, lowerCamelCase : int )-> int:
return self.encode_prefix(lowerCamelCase )
@torch.no_grad()
def snake_case ( self : Dict, lowerCamelCase : List[Any], lowerCamelCase : Any, lowerCamelCase : Optional[Any] )-> Tuple:
lowerCamelCase__ : Union[str, Any] =torch.split(lowerCamelCase, 1, dim=0 )
lowerCamelCase__ : List[Any] =[]
lowerCamelCase__ : Optional[int] =[]
for feature in features:
lowerCamelCase__ : List[str] =self.decode_prefix(feature.to(lowerCamelCase ) ) # back to the clip feature
# Only support beam search for now
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.generate_beam(
input_embeds=lowerCamelCase, device=lowerCamelCase, eos_token_id=lowerCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCamelCase__ : Union[str, Any] =torch.stack(lowerCamelCase )
lowerCamelCase__ : Any =torch.stack(lowerCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case ( self : Dict, lowerCamelCase : Dict=None, lowerCamelCase : int=None, lowerCamelCase : Any=None, lowerCamelCase : int = 5, lowerCamelCase : int = 67, lowerCamelCase : float = 1.0, lowerCamelCase : Optional[int] = None, )-> str:
lowerCamelCase__ : str =eos_token_id
lowerCamelCase__ : Any =None
lowerCamelCase__ : int =None
lowerCamelCase__ : List[str] =torch.ones(lowerCamelCase, device=lowerCamelCase, dtype=torch.int )
lowerCamelCase__ : Any =torch.zeros(lowerCamelCase, device=lowerCamelCase, dtype=torch.bool )
if input_embeds is not None:
lowerCamelCase__ : int =input_embeds
else:
lowerCamelCase__ : Optional[Any] =self.transformer.transformer.wte(lowerCamelCase )
for i in range(lowerCamelCase ):
lowerCamelCase__ : Any =self.transformer(inputs_embeds=lowerCamelCase )
lowerCamelCase__ : List[Any] =outputs.logits
lowerCamelCase__ : Tuple =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCamelCase__ : int =logits.softmax(-1 ).log()
if scores is None:
lowerCamelCase__ , lowerCamelCase__ : List[str] =logits.topk(lowerCamelCase, -1 )
lowerCamelCase__ : Union[str, Any] =generated.expand(lowerCamelCase, *generated.shape[1:] )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =next_tokens.permute(1, 0 ), scores.squeeze(0 )
if tokens is None:
lowerCamelCase__ : Tuple =next_tokens
else:
lowerCamelCase__ : Optional[Any] =tokens.expand(lowerCamelCase, *tokens.shape[1:] )
lowerCamelCase__ : Tuple =torch.cat((tokens, next_tokens), dim=1 )
else:
lowerCamelCase__ : Union[str, Any] =-float(np.inf )
lowerCamelCase__ : Dict =0
lowerCamelCase__ : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCamelCase__ : Tuple =scores_sum / seq_lengths[:, None]
lowerCamelCase__ , lowerCamelCase__ : Dict =scores_sum_average.view(-1 ).topk(lowerCamelCase, -1 )
lowerCamelCase__ : Optional[int] =next_tokens // scores_sum.shape[1]
lowerCamelCase__ : Tuple =seq_lengths[next_tokens_source]
lowerCamelCase__ : int =next_tokens % scores_sum.shape[1]
lowerCamelCase__ : Optional[int] =next_tokens.unsqueeze(1 )
lowerCamelCase__ : List[str] =tokens[next_tokens_source]
lowerCamelCase__ : Optional[Any] =torch.cat((tokens, next_tokens), dim=1 )
lowerCamelCase__ : Optional[Any] =generated[next_tokens_source]
lowerCamelCase__ : Any =scores_sum_average * seq_lengths
lowerCamelCase__ : List[str] =is_stopped[next_tokens_source]
lowerCamelCase__ : List[str] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0], 1, -1 )
lowerCamelCase__ : List[str] =torch.cat((generated, next_token_embed), dim=1 )
lowerCamelCase__ : Dict =is_stopped + next_tokens.eq(lowerCamelCase ).squeeze()
if is_stopped.all():
break
lowerCamelCase__ : Union[str, Any] =scores / seq_lengths
lowerCamelCase__ : Dict =scores.argsort(descending=lowerCamelCase )
# tokens tensors are already padded to max_seq_length
lowerCamelCase__ : int =[tokens[i] for i in order]
lowerCamelCase__ : Any =torch.stack(lowerCamelCase, dim=0 )
lowerCamelCase__ : Optional[Any] =torch.tensor([seq_lengths[i] for i in order], dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[int] ):
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCamelCase__ : Any =numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
lowerCamelCase__ : Dict =numbers[i]
if number < 0:
lowerCamelCase__ , lowerCamelCase__ : List[Any] =min_till_now, max_till_now
lowerCamelCase__ : Optional[int] =max(__lowerCamelCase , max_till_now * number )
lowerCamelCase__ : Dict =min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
lowerCamelCase__ : Tuple =max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 625 | 1 |
"""simple docstring"""
import functools
from typing import Any
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : list[str] ):
"""simple docstring"""
# Validation
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or len(__lowerCamelCase ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
lowerCamelCase__ : dict[str, Any] ={}
lowerCamelCase__ : Any ='''WORD_KEEPER'''
for word in words:
lowerCamelCase__ : Any =trie
for c in word:
if c not in trie_node:
lowerCamelCase__ : int ={}
lowerCamelCase__ : int =trie_node[c]
lowerCamelCase__ : Union[str, Any] =True
lowerCamelCase__ : Union[str, Any] =len(__lowerCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__lowerCamelCase : int ) -> bool:
if index == len_string:
return True
lowerCamelCase__ : List[str] =trie
for i in range(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : List[Any] =trie_node.get(string[i] , __lowerCamelCase )
if trie_node is None:
return False
if trie_node.get(__lowerCamelCase , __lowerCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 42
_a = 42
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 42
_a = (1_6, 3_2, 9_6, 2_5_6)
_a = jnp.floataa
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Tuple =nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
lowerCamelCase__ : Dict =[]
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ : Dict =self.block_out_channels[i]
lowerCamelCase__ : Dict =self.block_out_channels[i + 1]
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Optional[int] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Any =blocks
lowerCamelCase__ : Optional[int] =nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : Any, lowerCamelCase : int )-> List[str]:
lowerCamelCase__ : Tuple =self.conv_in(lowerCamelCase )
lowerCamelCase__ : Dict =nn.silu(lowerCamelCase )
for block in self.blocks:
lowerCamelCase__ : str =block(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =self.conv_out(lowerCamelCase )
return embedding
@flax_register_to_config
class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_a = 3_2
_a = 4
_a = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_a = False
_a = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_a = 2
_a = 8
_a = None
_a = 1_2_8_0
_a = 0.0
_a = False
_a = jnp.floataa
_a = True
_a = 0
_a = "rgb"
_a = (1_6, 3_2, 9_6, 2_5_6)
def snake_case ( self : str, lowerCamelCase : jax.random.KeyArray )-> FrozenDict:
# init input tensors
lowerCamelCase__ : int =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : int =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ : Union[str, Any] =jnp.ones((1,), dtype=jnp.intaa )
lowerCamelCase__ : str =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
lowerCamelCase__ : Any =(1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ : Optional[Any] =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =jax.random.split(lowerCamelCase )
lowerCamelCase__ : Dict ={'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )["params"]
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Optional[int] =self.block_out_channels
lowerCamelCase__ : Tuple =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : List[Any] =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : int =nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
lowerCamelCase__ : str =FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
lowerCamelCase__ : Dict =FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype )
lowerCamelCase__ : List[Any] =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
lowerCamelCase__ : Dict =self.only_cross_attention
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =(only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[str] =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : Union[str, Any] =[]
lowerCamelCase__ : Dict =[]
lowerCamelCase__ : List[Any] =block_out_channels[0]
lowerCamelCase__ : List[Any] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : List[Any] =output_channel
lowerCamelCase__ : str =block_out_channels[i]
lowerCamelCase__ : Dict =i == len(lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : str =FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
lowerCamelCase__ : List[Any] =FlaxDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(lowerCamelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
if not is_final_block:
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
lowerCamelCase__ : int =down_blocks
lowerCamelCase__ : List[str] =controlnet_down_blocks
# mid
lowerCamelCase__ : Tuple =block_out_channels[-1]
lowerCamelCase__ : List[Any] =FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : int, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : float = 1.0, lowerCamelCase : bool = True, lowerCamelCase : bool = False, )-> Union[FlaxControlNetOutput, Tuple]:
lowerCamelCase__ : int =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ : int =jnp.flip(lowerCamelCase, axis=1 )
# 1. time
if not isinstance(lowerCamelCase, jnp.ndarray ):
lowerCamelCase__ : Any =jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(lowerCamelCase, jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : int =jnp.expand_dims(lowerCamelCase, 0 )
lowerCamelCase__ : Optional[Any] =self.time_proj(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.time_embedding(lowerCamelCase )
# 2. pre-process
lowerCamelCase__ : Optional[int] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : Dict =self.conv_in(lowerCamelCase )
lowerCamelCase__ : List[str] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : int =self.controlnet_cond_embedding(lowerCamelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase__ : Union[str, Any] =(sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : Dict =down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : Tuple =down_block(lowerCamelCase, lowerCamelCase, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ : Optional[int] =self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ : Optional[Any] =()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks ):
lowerCamelCase__ : Union[str, Any] =controlnet_block(lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : List[str] =controlnet_down_block_res_samples
lowerCamelCase__ : List[str] =self.controlnet_mid_block(lowerCamelCase )
# 6. scaling
lowerCamelCase__ : Union[str, Any] =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase )
| 625 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
if not (isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )):
raise ValueError('''longest_common_substring() takes two strings for inputs''' )
lowerCamelCase__ : Any =len(__lowerCamelCase )
lowerCamelCase__ : Any =len(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =[[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
lowerCamelCase__ : Dict =0
lowerCamelCase__ : Dict =0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
lowerCamelCase__ : str =1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowerCamelCase__ : Optional[Any] =i
lowerCamelCase__ : Union[str, Any] =dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["CLIPFeatureExtractor"]
_lowercase : int = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 625 | 1 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 625 |
"""simple docstring"""
import os
def snake_case__ ( ):
"""simple docstring"""
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowerCamelCase__ : Tuple =str(file.readlines()[0] )
lowerCamelCase__ : int =names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : str =0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict =0
return total_score
if __name__ == "__main__":
print(solution())
| 625 | 1 |
"""simple docstring"""
import os
_lowercase : Dict = {"I": 1, "V": 5, "X": 1_0, "L": 5_0, "C": 1_0_0, "D": 5_0_0, "M": 1_0_0_0}
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Tuple =0
lowerCamelCase__ : Tuple =0
while index < len(__lowerCamelCase ) - 1:
lowerCamelCase__ : Optional[Any] =SYMBOLS[numerals[index]]
lowerCamelCase__ : int =SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : int =''''''
lowerCamelCase__ : Tuple =num // 1000
numerals += m_count * "M"
num %= 1000
lowerCamelCase__ : Tuple =num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCamelCase__ : Union[str, Any] =num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def snake_case__ ( __lowerCamelCase : str = "/p089_roman.txt" ):
"""simple docstring"""
lowerCamelCase__ : str =0
with open(os.path.dirname(__lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCamelCase__ : List[Any] =filea.readlines()
for line in lines:
lowerCamelCase__ : Optional[Any] =line.strip()
lowerCamelCase__ : Optional[Any] =parse_roman_numerals(__lowerCamelCase )
lowerCamelCase__ : int =generate_roman_numerals(__lowerCamelCase )
savings += len(__lowerCamelCase ) - len(__lowerCamelCase )
return savings
if __name__ == "__main__":
print(f'{solution() = }')
| 625 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : int )-> None:
lowerCamelCase__ : str =value
lowerCamelCase__ : Node | None =None
lowerCamelCase__ : Node | None =None
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int, lowerCamelCase : Node )-> None:
lowerCamelCase__ : Any =tree
def snake_case ( self : str, lowerCamelCase : Node | None )-> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict )-> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["CLIPFeatureExtractor"]
_lowercase : int = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 625 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowercase : List[str] = logging.getLogger(__name__)
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
"""simple docstring"""
# save results
if os.path.exists(__lowerCamelCase ):
if os.path.exists(os.path.join(__lowerCamelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''config.json''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''config.json''' ) )
if os.path.exists(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =2
if unlogit:
lowerCamelCase__ : Any =torch.pow(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[str] =p * torch.log(__lowerCamelCase )
lowerCamelCase__ : Tuple =0
return -plogp.sum(dim=-1 )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(__lowerCamelCase ) ) ) )
for row in range(len(__lowerCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=False ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Tuple =model.config.num_hidden_layers, model.config.num_attention_heads
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
if head_mask is None:
lowerCamelCase__ : List[Any] =torch.ones(__lowerCamelCase , __lowerCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : List[str] =0.0
lowerCamelCase__ : Union[str, Any] =0.0
for step, inputs in enumerate(tqdm(__lowerCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowerCamelCase__ : Any =tuple(t.to(args.device ) for t in inputs )
((lowerCamelCase__) , ) : Any =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCamelCase__ : Dict =model(__lowerCamelCase , labels=__lowerCamelCase , head_mask=__lowerCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Any =entropy(attn.detach() , __lowerCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCamelCase__ : int =2
lowerCamelCase__ : List[str] =torch.pow(torch.pow(__lowerCamelCase , __lowerCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
lowerCamelCase__ : int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(__lowerCamelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(__lowerCamelCase )
logger.info('''Head ranked by importance scores''' )
lowerCamelCase__ : Optional[int] =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCamelCase__ : Dict =torch.arange(
head_importance.numel() , device=args.device )
lowerCamelCase__ : Any =head_ranks.view_as(__lowerCamelCase )
print_ad_tensor(__lowerCamelCase )
return attn_entropy, head_importance, total_loss
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase )
lowerCamelCase__ : int =1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __lowerCamelCase , original_score * args.masking_threshold )
lowerCamelCase__ : Dict =torch.ones_like(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCamelCase__ : List[Any] =original_score
while current_score >= original_score * args.masking_threshold:
lowerCamelCase__ : List[Any] =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCamelCase__ : int =float('''Inf''' )
lowerCamelCase__ : Union[str, Any] =head_importance.view(-1 ).sort()[1]
if len(__lowerCamelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowerCamelCase__ : List[str] =current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowerCamelCase__ : Optional[int] =new_head_mask.view(-1 )
lowerCamelCase__ : Optional[Any] =0.0
lowerCamelCase__ : Dict =new_head_mask.view_as(__lowerCamelCase )
lowerCamelCase__ : Tuple =new_head_mask.clone().detach()
print_ad_tensor(__lowerCamelCase )
# Compute metric and head importance again
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Any =1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(__lowerCamelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Tuple =1 / loss
lowerCamelCase__ : Optional[Any] =datetime.now() - before_time
lowerCamelCase__ : int =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Optional[int] =[
v,
]
assert sum(len(__lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCamelCase )
lowerCamelCase__ : List[str] =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase , actually_pruned=__lowerCamelCase , )
lowerCamelCase__ : str =1 / loss
lowerCamelCase__ : Union[str, Any] =datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __lowerCamelCase , __lowerCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __lowerCamelCase , __lowerCamelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(__lowerCamelCase , args.output_dir )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__lowerCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__lowerCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__lowerCamelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=__lowerCamelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowerCamelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__lowerCamelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=__lowerCamelCase , default=42 )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
lowerCamelCase__ : List[Any] =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCamelCase__ : Dict =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowerCamelCase__ : Dict =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCamelCase__ : str =torch.device('''cuda''' , args.local_rank )
lowerCamelCase__ : Any =1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCamelCase__ : Union[str, Any] =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCamelCase__ : List[Any] =nn.parallel.DistributedDataParallel(
__lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCamelCase )
elif args.n_gpu > 1:
lowerCamelCase__ : int =nn.DataParallel(__lowerCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Prepare dataset
lowerCamelCase__ : Union[str, Any] =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCamelCase__ : Any =(torch.from_numpy(__lowerCamelCase ),)
lowerCamelCase__ : List[Any] =TensorDataset(*__lowerCamelCase )
lowerCamelCase__ : List[str] =RandomSampler(__lowerCamelCase )
lowerCamelCase__ : Dict =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCamelCase__ : Optional[int] =mask_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
prune_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 625 | 1 |
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : int, lowerCamelCase : Any=None, lowerCamelCase : Union[str, Any]=None )-> int:
lowerCamelCase__ : Any =data
lowerCamelCase__ : Dict =previous
lowerCamelCase__ : Optional[Any] =next_node
def __str__( self : Tuple )-> str:
return F'''{self.data}'''
def snake_case ( self : Any )-> int:
return self.data
def snake_case ( self : List[str] )-> Dict:
return self.next
def snake_case ( self : Optional[int] )-> List[Any]:
return self.previous
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any], lowerCamelCase : Union[str, Any] )-> List[Any]:
lowerCamelCase__ : int =head
def __iter__( self : List[str] )-> Tuple:
return self
def snake_case ( self : int )-> str:
if not self.current:
raise StopIteration
else:
lowerCamelCase__ : Optional[Any] =self.current.get_data()
lowerCamelCase__ : str =self.current.get_next()
return value
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] )-> List[str]:
lowerCamelCase__ : Optional[int] =None # First node in list
lowerCamelCase__ : Tuple =None # Last node in list
def __str__( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Optional[int] =self.head
lowerCamelCase__ : str =[]
while current is not None:
nodes.append(current.get_data() )
lowerCamelCase__ : Tuple =current.get_next()
return " ".join(str(lowerCamelCase ) for node in nodes )
def __contains__( self : str, lowerCamelCase : int )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =self.head
while current:
if current.get_data() == value:
return True
lowerCamelCase__ : Tuple =current.get_next()
return False
def __iter__( self : Dict )-> Any:
return LinkedListIterator(self.head )
def snake_case ( self : Tuple )-> Union[str, Any]:
if self.head:
return self.head.get_data()
return None
def snake_case ( self : Optional[Any] )-> int:
if self.tail:
return self.tail.get_data()
return None
def snake_case ( self : Optional[int], lowerCamelCase : Node )-> None:
if self.head is None:
lowerCamelCase__ : str =node
lowerCamelCase__ : Optional[int] =node
else:
self.insert_before_node(self.head, lowerCamelCase )
def snake_case ( self : List[str], lowerCamelCase : Node )-> None:
if self.head is None:
self.set_head(lowerCamelCase )
else:
self.insert_after_node(self.tail, lowerCamelCase )
def snake_case ( self : List[str], lowerCamelCase : int )-> None:
lowerCamelCase__ : str =Node(lowerCamelCase )
if self.head is None:
self.set_head(lowerCamelCase )
else:
self.set_tail(lowerCamelCase )
def snake_case ( self : str, lowerCamelCase : Node, lowerCamelCase : Node )-> None:
lowerCamelCase__ : List[str] =node
lowerCamelCase__ : Optional[int] =node.previous
if node.get_previous() is None:
lowerCamelCase__ : str =node_to_insert
else:
lowerCamelCase__ : Dict =node_to_insert
lowerCamelCase__ : Dict =node_to_insert
def snake_case ( self : List[Any], lowerCamelCase : Node, lowerCamelCase : Node )-> None:
lowerCamelCase__ : List[str] =node
lowerCamelCase__ : Dict =node.next
if node.get_next() is None:
lowerCamelCase__ : Any =node_to_insert
else:
lowerCamelCase__ : Optional[int] =node_to_insert
lowerCamelCase__ : Optional[Any] =node_to_insert
def snake_case ( self : Optional[Any], lowerCamelCase : int, lowerCamelCase : int )-> None:
lowerCamelCase__ : int =1
lowerCamelCase__ : Optional[Any] =Node(lowerCamelCase )
lowerCamelCase__ : Dict =self.head
while node:
if current_position == position:
self.insert_before_node(lowerCamelCase, lowerCamelCase )
return
current_position += 1
lowerCamelCase__ : Tuple =node.next
self.insert_after_node(self.tail, lowerCamelCase )
def snake_case ( self : Dict, lowerCamelCase : int )-> Node:
lowerCamelCase__ : Any =self.head
while node:
if node.get_data() == item:
return node
lowerCamelCase__ : Tuple =node.get_next()
raise Exception('''Node not found''' )
def snake_case ( self : Optional[Any], lowerCamelCase : Dict )-> Dict:
if (node := self.get_node(lowerCamelCase )) is not None:
if node == self.head:
lowerCamelCase__ : List[str] =self.head.get_next()
if node == self.tail:
lowerCamelCase__ : Tuple =self.tail.get_previous()
self.remove_node_pointers(lowerCamelCase )
@staticmethod
def snake_case ( lowerCamelCase : Node )-> None:
if node.get_next():
lowerCamelCase__ : Optional[Any] =node.previous
if node.get_previous():
lowerCamelCase__ : str =node.next
lowerCamelCase__ : int =None
lowerCamelCase__ : Any =None
def snake_case ( self : Any )-> Dict:
return self.head is None
def snake_case__ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =AutoConfig.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : Any =FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ='''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
lowerCamelCase__ : List[str] ='''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCamelCase__ : List[Any] ='''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[Any] ='''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : List[Any] =f'''layers_{str(__lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : str =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase__ : Dict =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : Tuple =tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase__ : str =flax_model.params['''encoder''']['''block'''][str(__lowerCamelCase )]['''layer''']
lowerCamelCase__ : int =tax_attention_key
lowerCamelCase__ : Optional[int] =tax_attention_out
lowerCamelCase__ : List[Any] =tax_attention_query
lowerCamelCase__ : Optional[Any] =tax_attention_value
lowerCamelCase__ : List[str] =tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] =tax_global_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
else:
lowerCamelCase__ : Union[str, Any] =tax_mlp_wi
lowerCamelCase__ : str =tax_mlp_wo
lowerCamelCase__ : Optional[Any] =tax_mlp_layer_norm
lowerCamelCase__ : Optional[int] =flax_model_encoder_layer_block
# Only for layer 0:
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : str =tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : Optional[int] =tax_encoder_global_rel_embedding
# Assigning
lowerCamelCase__ : int =tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
lowerCamelCase__ : List[Any] =tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : Dict =f'''layers_{str(__lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_module['''key''']['''kernel''']
lowerCamelCase__ : Any =tax_enc_dec_attention_module['''out''']['''kernel''']
lowerCamelCase__ : Dict =tax_enc_dec_attention_module['''query''']['''kernel''']
lowerCamelCase__ : List[str] =tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase__ : Any =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase__ : str =flax_model.params['''decoder''']['''block'''][str(__lowerCamelCase )]['''layer''']
lowerCamelCase__ : Union[str, Any] =tax_attention_key
lowerCamelCase__ : str =tax_attention_out
lowerCamelCase__ : Optional[int] =tax_attention_query
lowerCamelCase__ : Dict =tax_attention_value
lowerCamelCase__ : List[str] =tax_pre_attention_layer_norm
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_key
lowerCamelCase__ : Any =tax_enc_dec_attention_out
lowerCamelCase__ : Any =tax_enc_dec_attention_query
lowerCamelCase__ : Optional[int] =tax_enc_dec_attention_value
lowerCamelCase__ : Dict =tax_cross_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Tuple =tax_mlp_wi_a
lowerCamelCase__ : int =tax_mlp_wi_a
else:
lowerCamelCase__ : List[Any] =tax_mlp_wi
lowerCamelCase__ : Dict =tax_mlp_wo
lowerCamelCase__ : Tuple =txa_mlp_layer_norm
lowerCamelCase__ : Optional[Any] =flax_model_decoder_layer_block
# Decoder Normalization
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
lowerCamelCase__ : int =txa_decoder_norm
# Only for layer 0:
lowerCamelCase__ : Tuple =tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : Tuple =tax_decoder_rel_embedding
# Token Embeddings
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''token_embedder''']['''embedding''']
lowerCamelCase__ : Dict =txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCamelCase__ : int =tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(__lowerCamelCase )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
_lowercase : List[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 625 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[int]=13, lowerCamelCase : str=7, lowerCamelCase : Dict=True, lowerCamelCase : int=True, lowerCamelCase : Dict=True, lowerCamelCase : int=True, lowerCamelCase : Optional[int]=99, lowerCamelCase : Dict=32, lowerCamelCase : Tuple=5, lowerCamelCase : List[Any]=4, lowerCamelCase : int=37, lowerCamelCase : Optional[Any]="gelu", lowerCamelCase : int=0.1, lowerCamelCase : Any=0.1, lowerCamelCase : Dict=512, lowerCamelCase : str=16, lowerCamelCase : Dict=2, lowerCamelCase : Union[str, Any]=0.02, lowerCamelCase : Tuple=False, lowerCamelCase : Dict=True, lowerCamelCase : Optional[int]="None", lowerCamelCase : Union[str, Any]=3, lowerCamelCase : Optional[int]=4, lowerCamelCase : Optional[int]=None, )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =parent
lowerCamelCase__ : Union[str, Any] =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : int =is_training
lowerCamelCase__ : str =use_input_mask
lowerCamelCase__ : int =use_token_type_ids
lowerCamelCase__ : Optional[int] =use_labels
lowerCamelCase__ : Optional[Any] =vocab_size
lowerCamelCase__ : Tuple =hidden_size
lowerCamelCase__ : Any =num_hidden_layers
lowerCamelCase__ : int =num_attention_heads
lowerCamelCase__ : List[str] =intermediate_size
lowerCamelCase__ : List[str] =hidden_act
lowerCamelCase__ : Any =hidden_dropout_prob
lowerCamelCase__ : int =attention_probs_dropout_prob
lowerCamelCase__ : str =max_position_embeddings
lowerCamelCase__ : str =type_vocab_size
lowerCamelCase__ : List[str] =type_sequence_label_size
lowerCamelCase__ : List[Any] =initializer_range
lowerCamelCase__ : List[Any] =num_labels
lowerCamelCase__ : List[Any] =num_choices
lowerCamelCase__ : List[str] =relative_attention
lowerCamelCase__ : str =position_biased_input
lowerCamelCase__ : Optional[int] =pos_att_type
lowerCamelCase__ : str =scope
def snake_case ( self : str )-> int:
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Tuple =None
if self.use_input_mask:
lowerCamelCase__ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
lowerCamelCase__ : int =None
if self.use_token_type_ids:
lowerCamelCase__ : List[str] =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : str =None
lowerCamelCase__ : int =None
if self.use_labels:
lowerCamelCase__ : str =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Dict =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : Any =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : Optional[int] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self : Any )-> Any:
return DebertaVaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, )
def snake_case ( self : int, lowerCamelCase : Dict )-> Optional[int]:
self.parent.assertListEqual(list(result.loss.size() ), [] )
def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : str, lowerCamelCase : Any, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any )-> List[str]:
lowerCamelCase__ : Dict =DebertaVaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : List[str] =model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase )[0]
lowerCamelCase__ : int =model(lowerCamelCase, token_type_ids=lowerCamelCase )[0]
lowerCamelCase__ : List[str] =model(lowerCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ), [self.batch_size, self.seq_length, self.hidden_size] )
def snake_case ( self : List[Any], lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : str, lowerCamelCase : Any, lowerCamelCase : Optional[int] )-> List[Any]:
lowerCamelCase__ : Optional[int] =DebertaVaForMaskedLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : Any =model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Union[str, Any], lowerCamelCase : int, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any] )-> List[Any]:
lowerCamelCase__ : int =self.num_labels
lowerCamelCase__ : Dict =DebertaVaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple =model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertListEqual(list(result.logits.size() ), [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCamelCase )
def snake_case ( self : Tuple, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Tuple, lowerCamelCase : int )-> int:
lowerCamelCase__ : int =self.num_labels
lowerCamelCase__ : List[str] =DebertaVaForTokenClassification(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : str, lowerCamelCase : Any, lowerCamelCase : List[Any], lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int] )-> Dict:
lowerCamelCase__ : Union[str, Any] =DebertaVaForQuestionAnswering(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple =model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def snake_case ( self : Tuple, lowerCamelCase : List[str], lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict, lowerCamelCase : Dict )-> Dict:
lowerCamelCase__ : Optional[int] =DebertaVaForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Dict =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Any =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int =model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ : str =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Optional[int] =config_and_inputs
lowerCamelCase__ : List[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_a = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
_a = False
def snake_case ( self : List[Any] )-> int:
lowerCamelCase__ : Optional[int] =DebertaVaModelTester(self )
lowerCamelCase__ : Any =ConfigTester(self, config_class=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Any )-> Dict:
self.config_tester.run_common_tests()
def snake_case ( self : List[str] )-> int:
lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCamelCase )
def snake_case ( self : Optional[Any] )-> Any:
lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCamelCase )
def snake_case ( self : Dict )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCamelCase )
def snake_case ( self : Any )-> Dict:
lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCamelCase )
def snake_case ( self : Optional[int] )-> str:
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCamelCase )
def snake_case ( self : str )-> str:
lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCamelCase )
@slow
def snake_case ( self : List[str] )-> Optional[int]:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[Any] =DebertaVaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='''Model not available yet''' )
def snake_case ( self : Optional[Any] )-> Optional[Any]:
pass
@slow
def snake_case ( self : Dict )-> Any:
lowerCamelCase__ : List[Any] =DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
lowerCamelCase__ : Optional[int] =torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowerCamelCase__ : Optional[int] =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase )[0]
# compare the actual values for a slice.
lowerCamelCase__ : str =torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], lowerCamelCase, atol=1E-4 ), F'''{output[:, 1:4, 1:4]}''' )
| 625 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : List[Any]=32, lowerCamelCase : Dict=3, lowerCamelCase : int=4, lowerCamelCase : str=[10, 20, 30, 40], lowerCamelCase : Any=[2, 2, 3, 2], lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : str=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[int]=10, lowerCamelCase : Any=0.02, lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"], lowerCamelCase : Optional[int]=3, lowerCamelCase : Tuple=None, )-> List[str]:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Tuple =batch_size
lowerCamelCase__ : str =image_size
lowerCamelCase__ : Any =num_channels
lowerCamelCase__ : Tuple =num_stages
lowerCamelCase__ : List[str] =hidden_sizes
lowerCamelCase__ : Any =depths
lowerCamelCase__ : Union[str, Any] =is_training
lowerCamelCase__ : Tuple =use_labels
lowerCamelCase__ : int =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : Dict =type_sequence_label_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Any =out_features
lowerCamelCase__ : Tuple =num_labels
lowerCamelCase__ : Optional[int] =scope
lowerCamelCase__ : Optional[int] =num_stages
def snake_case ( self : str )-> Optional[int]:
lowerCamelCase__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple =None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int =self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] )-> Any:
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def snake_case ( self : Union[str, Any] )-> Any:
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowerCamelCase, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowerCamelCase, loss_ignore_index=255, num_labels=self.num_labels, )
def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ : List[str] =UperNetForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Dict =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any =config_and_inputs
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_a = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =UperNetModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Optional[int] )-> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] )-> Dict:
return
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
lowerCamelCase__ : Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple =[*signature.parameters.keys()]
lowerCamelCase__ : List[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def snake_case ( self : Any )-> Union[str, Any]:
lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def snake_case ( self : Optional[Any] )-> List[Any]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def snake_case ( self : Any )-> List[str]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : int )-> Any:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : Dict )-> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case ( self : List[Any] )-> List[str]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : Tuple )-> str:
pass
def snake_case ( self : Optional[int] )-> List[str]:
def check_hidden_states_output(lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[str] ):
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : List[str] =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : str =_config_zero_init(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =_config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def snake_case ( self : Any )-> str:
pass
@slow
def snake_case ( self : int )-> Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : str =UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
lowerCamelCase__ : List[str] =Image.open(__lowerCamelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : str )-> Union[str, Any]:
lowerCamelCase__ : List[Any] =AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
lowerCamelCase__ : List[Any] =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : List[Any] =prepare_img()
lowerCamelCase__ : List[Any] =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : Dict =torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
lowerCamelCase__ : Tuple =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : Dict =prepare_img()
lowerCamelCase__ : Any =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : Any =model(**lowerCamelCase )
lowerCamelCase__ : Dict =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : List[str] =torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
| 625 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : list[str] )-> List[Any]:
lowerCamelCase__ : list[dict] =[]
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCamelCase )
self.set_fail_transitions()
def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : str )-> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def snake_case ( self : Tuple, lowerCamelCase : str )-> None:
lowerCamelCase__ : str =0
for character in keyword:
lowerCamelCase__ : str =self.find_next_state(lowerCamelCase, lowerCamelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowerCamelCase__ : Tuple =len(self.adlist ) - 1
else:
lowerCamelCase__ : Union[str, Any] =next_state
self.adlist[current_state]["output"].append(lowerCamelCase )
def snake_case ( self : Dict )-> None:
lowerCamelCase__ : deque =deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCamelCase )
lowerCamelCase__ : Dict =0
while q:
lowerCamelCase__ : Tuple =q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCamelCase )
lowerCamelCase__ : Tuple =self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCamelCase, self.adlist[child]['''value'''] ) is None
and state != 0
):
lowerCamelCase__ : List[str] =self.adlist[state]['''fail_state''']
lowerCamelCase__ : Optional[Any] =self.find_next_state(
lowerCamelCase, self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
lowerCamelCase__ : Dict =0
lowerCamelCase__ : Optional[int] =(
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def snake_case ( self : Any, lowerCamelCase : str )-> dict[str, list[int]]:
lowerCamelCase__ : dict ={} # returns a dict with keywords and list of its occurrences
lowerCamelCase__ : int =0
for i in range(len(lowerCamelCase ) ):
while (
self.find_next_state(lowerCamelCase, string[i] ) is None
and current_state != 0
):
lowerCamelCase__ : Optional[Any] =self.adlist[current_state]['''fail_state''']
lowerCamelCase__ : int =self.find_next_state(lowerCamelCase, string[i] )
if next_state is None:
lowerCamelCase__ : List[str] =0
else:
lowerCamelCase__ : int =next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowerCamelCase__ : Optional[Any] =[]
result[key].append(i - len(lowerCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
_a = ['onnx']
def __init__( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : str )-> Optional[int]:
requires_backends(self, ['''onnx'''] )
@classmethod
def snake_case ( cls : List[str], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
@classmethod
def snake_case ( cls : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : Tuple )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
| 625 | 1 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Union[str, Any]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def snake_case ( self : List[Any] )-> Dict:
lowerCamelCase__ : Dict ={'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(lowerCamelCase )
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Dict =self._create_example_records()
lowerCamelCase__ : int =Dataset.from_list(lowerCamelCase )
self.assertListEqual(dset.column_names, ['''col_1''', '''col_2'''] )
for i, r in enumerate(lowerCamelCase ):
self.assertDictEqual(lowerCamelCase, example_records[i] )
def snake_case ( self : Optional[Any] )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self._create_example_records()
lowerCamelCase__ : Tuple =Dataset.from_list(lowerCamelCase )
lowerCamelCase__ : int =Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info, dset_from_dict.info )
def snake_case ( self : int )-> Tuple: # checks what happens with missing columns
lowerCamelCase__ : Union[str, Any] =[{'''col_1''': 1}, {'''col_2''': '''x'''}]
lowerCamelCase__ : Optional[int] =Dataset.from_list(lowerCamelCase )
self.assertDictEqual(dset[0], {'''col_1''': 1} )
self.assertDictEqual(dset[1], {'''col_1''': None} ) # NB: first record is used for columns
def snake_case ( self : Any )-> Dict: # checks if the type can be inferred from the second record
lowerCamelCase__ : Tuple =[{'''col_1''': []}, {'''col_1''': [1, 2]}]
lowerCamelCase__ : Optional[int] =Dataset.from_list(lowerCamelCase )
self.assertEqual(dset.info.features['''col_1'''], Sequence(Value('''int64''' ) ) )
def snake_case ( self : Optional[Any] )-> Optional[int]:
lowerCamelCase__ : int =Dataset.from_list([] )
self.assertEqual(len(lowerCamelCase ), 0 )
self.assertListEqual(dset.column_names, [] )
| 625 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =x
lowerCamelCase__ : Any =y
for step in range(__lowerCamelCase ): # noqa: B007
lowerCamelCase__ : List[Any] =a * a - b * b + x
lowerCamelCase__ : Optional[int] =2 * a * b + y
lowerCamelCase__ : Union[str, Any] =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) )
def snake_case__ ( __lowerCamelCase : int = 800 , __lowerCamelCase : int = 600 , __lowerCamelCase : float = -0.6 , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 3.2 , __lowerCamelCase : int = 50 , __lowerCamelCase : bool = True , ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =Image.new('''RGB''' , (image_width, image_height) )
lowerCamelCase__ : Optional[int] =img.load()
# loop through the image-coordinates
for image_x in range(__lowerCamelCase ):
for image_y in range(__lowerCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase__ : Optional[Any] =figure_width / image_width * image_height
lowerCamelCase__ : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase__ : Optional[int] =figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase__ : Any =get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase__ : int =get_color_coded_rgb(__lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] =get_black_and_white_rgb(__lowerCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 625 | 1 |
"""simple docstring"""
_lowercase : Optional[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 625 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =VideoMAEConfig()
set_architecture_configs(__lowerCamelCase , __lowerCamelCase )
if "finetuned" not in model_name:
lowerCamelCase__ : int =False
if "finetuned" in model_name:
lowerCamelCase__ : str ='''huggingface/label-files'''
if "kinetics" in model_name:
lowerCamelCase__ : List[Any] =400
lowerCamelCase__ : Optional[int] ='''kinetics400-id2label.json'''
elif "ssv2" in model_name:
lowerCamelCase__ : Tuple =174
lowerCamelCase__ : Optional[Any] ='''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : List[Any] ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict =idalabel
lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()}
return config
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if "small" in model_name:
lowerCamelCase__ : Optional[Any] =384
lowerCamelCase__ : List[Any] =1536
lowerCamelCase__ : int =12
lowerCamelCase__ : Dict =16
lowerCamelCase__ : List[Any] =12
lowerCamelCase__ : Optional[Any] =3
lowerCamelCase__ : Union[str, Any] =192
lowerCamelCase__ : str =768
elif "large" in model_name:
lowerCamelCase__ : Union[str, Any] =1024
lowerCamelCase__ : str =4096
lowerCamelCase__ : int =24
lowerCamelCase__ : Dict =16
lowerCamelCase__ : Union[str, Any] =12
lowerCamelCase__ : List[Any] =8
lowerCamelCase__ : int =512
lowerCamelCase__ : Optional[Any] =2048
elif "huge" in model_name:
lowerCamelCase__ : Optional[int] =1280
lowerCamelCase__ : Optional[int] =5120
lowerCamelCase__ : List[Any] =32
lowerCamelCase__ : List[Any] =16
lowerCamelCase__ : Optional[Any] =12
lowerCamelCase__ : Dict =8
lowerCamelCase__ : List[Any] =640
lowerCamelCase__ : Any =2560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
if "encoder." in name:
lowerCamelCase__ : Optional[int] =name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
lowerCamelCase__ : List[Any] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase__ : Any =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ : List[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowerCamelCase__ : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : int =name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowerCamelCase__ : Any =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowerCamelCase__ : Any =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : str =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
lowerCamelCase__ : List[str] =name.replace('''head''' , '''classifier''' )
return name
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase )
if key.startswith('''encoder.''' ):
lowerCamelCase__ : Optional[int] =key.replace('''encoder.''' , '''''' )
if "qkv" in key:
lowerCamelCase__ : Any =key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
lowerCamelCase__ : Tuple =config.decoder_hidden_size
lowerCamelCase__ : str =int(key_split[2] )
lowerCamelCase__ : Any ='''decoder.decoder_layers.'''
if "weight" in key:
lowerCamelCase__ : List[Any] =val[:dim, :]
lowerCamelCase__ : Any =val[dim : dim * 2, :]
lowerCamelCase__ : Dict =val[-dim:, :]
else:
lowerCamelCase__ : Optional[Any] =config.hidden_size
lowerCamelCase__ : Optional[Any] =int(key_split[1] )
lowerCamelCase__ : str ='''videomae.encoder.layer.'''
if "weight" in key:
lowerCamelCase__ : int =val[:dim, :]
lowerCamelCase__ : Tuple =val[dim : dim * 2, :]
lowerCamelCase__ : List[Any] =val[-dim:, :]
else:
lowerCamelCase__ : int =val
return orig_state_dict
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCamelCase__ : Optional[Any] =np.load(__lowerCamelCase )
return list(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : str =get_videomae_config(__lowerCamelCase )
if "finetuned" in model_name:
lowerCamelCase__ : Tuple =VideoMAEForVideoClassification(__lowerCamelCase )
else:
lowerCamelCase__ : int =VideoMAEForPreTraining(__lowerCamelCase )
# download original checkpoint, hosted on Google Drive
lowerCamelCase__ : Union[str, Any] ='''pytorch_model.bin'''
gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )
if "model" in files:
lowerCamelCase__ : Dict =files['''model''']
else:
lowerCamelCase__ : str =files['''module''']
lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify model on basic input
lowerCamelCase__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCamelCase__ : int =prepare_video()
lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' )
if "finetuned" not in model_name:
lowerCamelCase__ : Tuple =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase )
lowerCamelCase__ : int =model(**__lowerCamelCase )
lowerCamelCase__ : Dict =outputs.logits
lowerCamelCase__ : List[str] =[
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCamelCase__ : int =torch.Size([1, 174] )
lowerCamelCase__ : Dict =torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
lowerCamelCase__ : List[str] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
lowerCamelCase__ : List[Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[str] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCamelCase__ : str =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[Any] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : Optional[int] =torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCamelCase__ : List[str] =torch.Size([1, 400] )
lowerCamelCase__ : Dict =torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCamelCase__ : str =torch.Size([1, 400] )
lowerCamelCase__ : Any =torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
lowerCamelCase__ : Tuple =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCamelCase__ : Optional[int] =torch.Size([1, 174] )
lowerCamelCase__ : Any =torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : str =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCamelCase__ : str =torch.Size([1, 174] )
lowerCamelCase__ : int =torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCamelCase__ : str =outputs.loss
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__lowerCamelCase , organization='''nielsr''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 625 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = StableDiffusionInpaintPipeline
_a = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def snake_case ( self : int )-> int:
torch.manual_seed(0 )
lowerCamelCase__ : List[str] =UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCamelCase, )
lowerCamelCase__ : Tuple =PNDMScheduler(skip_prk_steps=lowerCamelCase )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='''gelu''', projection_dim=512, )
lowerCamelCase__ : Union[str, Any] =CLIPTextModel(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase__ : Optional[int] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case ( self : Tuple, lowerCamelCase : str, lowerCamelCase : List[Any]=0 )-> List[Any]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
lowerCamelCase__ : Any =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
lowerCamelCase__ : Any =image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Union[str, Any] =Image.fromarray(np.uinta(lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
lowerCamelCase__ : List[str] =Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Optional[Any] =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : str ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case ( self : Optional[Any] )-> int:
lowerCamelCase__ : List[str] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Any =self.get_dummy_components()
lowerCamelCase__ : Tuple =StableDiffusionInpaintPipeline(**lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Tuple =self.get_dummy_inputs(lowerCamelCase )
lowerCamelCase__ : Optional[int] =sd_pipe(**lowerCamelCase ).images
lowerCamelCase__ : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : Tuple =np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self : str )-> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : int )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Dict )-> Any:
lowerCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowerCamelCase__ : str =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowerCamelCase__ : List[Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
lowerCamelCase__ : Optional[int] ='''stabilityai/stable-diffusion-2-inpainting'''
lowerCamelCase__ : Tuple =StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase, safety_checker=lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase__ : List[Any] ='''Face of a yellow cat, high resolution, sitting on a park bench'''
lowerCamelCase__ : Dict =torch.manual_seed(0 )
lowerCamelCase__ : Any =pipe(
prompt=lowerCamelCase, image=lowerCamelCase, mask_image=lowerCamelCase, generator=lowerCamelCase, output_type='''np''', )
lowerCamelCase__ : Tuple =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def snake_case ( self : Any )-> int:
lowerCamelCase__ : List[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowerCamelCase__ : List[str] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
lowerCamelCase__ : Any ='''stabilityai/stable-diffusion-2-inpainting'''
lowerCamelCase__ : int =StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase, torch_dtype=torch.floataa, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase__ : Union[str, Any] ='''Face of a yellow cat, high resolution, sitting on a park bench'''
lowerCamelCase__ : Any =torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =pipe(
prompt=lowerCamelCase, image=lowerCamelCase, mask_image=lowerCamelCase, generator=lowerCamelCase, output_type='''np''', )
lowerCamelCase__ : List[Any] =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def snake_case ( self : Any )-> Any:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowerCamelCase__ : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowerCamelCase__ : Tuple ='''stabilityai/stable-diffusion-2-inpainting'''
lowerCamelCase__ : Optional[Any] =PNDMScheduler.from_pretrained(lowerCamelCase, subfolder='''scheduler''' )
lowerCamelCase__ : Dict =StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, scheduler=lowerCamelCase, torch_dtype=torch.floataa, )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : Dict ='''Face of a yellow cat, high resolution, sitting on a park bench'''
lowerCamelCase__ : Tuple =torch.manual_seed(0 )
lowerCamelCase__ : str =pipe(
prompt=lowerCamelCase, image=lowerCamelCase, mask_image=lowerCamelCase, generator=lowerCamelCase, num_inference_steps=2, output_type='''np''', )
lowerCamelCase__ : Any =torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 625 |
"""simple docstring"""
_lowercase : str = 0 # The first color of the flag.
_lowercase : Dict = 1 # The second color of the flag.
_lowercase : Tuple = 2 # The third color of the flag.
_lowercase : Optional[int] = (red, white, blue)
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
if not sequence:
return []
if len(__lowerCamelCase ) == 1:
return list(__lowerCamelCase )
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Dict =len(__lowerCamelCase ) - 1
lowerCamelCase__ : Tuple =0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =sequence[high], sequence[mid]
high -= 1
else:
lowerCamelCase__ : Dict =f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[Any] = input("Enter numbers separated by commas:\n").strip()
_lowercase : int = [int(item.strip()) for item in user_input.split(",")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 625 | 1 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =tf.convert_to_tensor(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def snake_case__ ( __lowerCamelCase : Dict ):
"""simple docstring"""
lowerCamelCase__ : Dict =tf.convert_to_tensor(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =tf.cast(math.pi , x.dtype )
lowerCamelCase__ : int =tf.cast(0.04_47_15 , x.dtype )
lowerCamelCase__ : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__lowerCamelCase , 3 )) ))
return x * cdf
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =tf.convert_to_tensor(__lowerCamelCase )
return x * tf.tanh(tf.math.softplus(__lowerCamelCase ) )
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =tf.convert_to_tensor(__lowerCamelCase )
lowerCamelCase__ : Optional[int] =tf.cast(0.04_47_15 , x.dtype )
lowerCamelCase__ : int =tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def snake_case__ ( __lowerCamelCase : Dict ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =tf.convert_to_tensor(__lowerCamelCase )
lowerCamelCase__ : Any =tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return tf.clip_by_value(_gelu(__lowerCamelCase ) , -10 , 10 )
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=-1 ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : List[Any] =tf.split(__lowerCamelCase , 2 , axis=__lowerCamelCase )
return a * tf.math.sigmoid(__lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return tf.keras.activations.gelu(__lowerCamelCase , approximate=__lowerCamelCase )
_lowercase : Optional[Any] = tf.keras.activations.gelu
_lowercase : Any = approximate_gelu_wrap
else:
_lowercase : Tuple = _gelu
_lowercase : Tuple = _gelu_new
_lowercase : Optional[int] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 625 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = StableUnCLIPImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def snake_case ( self : List[str] )-> str:
lowerCamelCase__ : Dict =32
lowerCamelCase__ : Optional[Any] =embedder_hidden_size
# image encoding components
lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase__ : Tuple =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowerCamelCase__ : Dict =UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =DDIMScheduler(
beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =AutoencoderKL()
lowerCamelCase__ : int ={
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
lowerCamelCase__ : int =input_image * 0.5 + 0.5
lowerCamelCase__ : Dict =input_image.clamp(0, 1 )
lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase )
lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase )
inputs.update({'''image_embeds''': None} )
lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images
lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def snake_case ( self : List[str] )-> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : List[Any] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : Tuple =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : List[Any] =pipe(
lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', )
lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 625 | 1 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def snake_case__ ( __lowerCamelCase : ndarray ):
"""simple docstring"""
return np.dot(__lowerCamelCase , __lowerCamelCase )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str], *,
lowerCamelCase : float = np.inf, lowerCamelCase : str = "linear", lowerCamelCase : float = 0.0, )-> None:
lowerCamelCase__ : List[Any] =regularization
lowerCamelCase__ : List[Any] =gamma
if kernel == "linear":
lowerCamelCase__ : Union[str, Any] =self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma, (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
lowerCamelCase__ : Any =self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCamelCase__ : Dict =F'''Unknown kernel: {kernel}'''
raise ValueError(lowerCamelCase )
def snake_case ( self : Any, lowerCamelCase : ndarray, lowerCamelCase : ndarray )-> float:
return np.dot(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[Any], lowerCamelCase : ndarray, lowerCamelCase : ndarray )-> float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def snake_case ( self : Any, lowerCamelCase : list[ndarray], lowerCamelCase : ndarray )-> None:
lowerCamelCase__ : int =observations
lowerCamelCase__ : Dict =classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCamelCase__) , ) : Optional[Any] =np.shape(lowerCamelCase )
def to_minimize(lowerCamelCase : ndarray ) -> float:
lowerCamelCase__ : Union[str, Any] =0
((lowerCamelCase__) , ) : Union[str, Any] =np.shape(lowerCamelCase )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i], observations[j] )
)
return 1 / 2 * s - sum(lowerCamelCase )
lowerCamelCase__ : List[str] =LinearConstraint(lowerCamelCase, 0, 0 )
lowerCamelCase__ : Optional[Any] =Bounds(0, self.regularization )
lowerCamelCase__ : str =minimize(
lowerCamelCase, np.ones(lowerCamelCase ), bounds=lowerCamelCase, constraints=[ly_contraint] ).x
lowerCamelCase__ : int =l_star
# calculating mean offset of separation plane to points
lowerCamelCase__ : Any =0
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i], observations[j] )
lowerCamelCase__ : List[Any] =s / n
def snake_case ( self : Dict, lowerCamelCase : ndarray )-> int:
lowerCamelCase__ : int =sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n], lowerCamelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 4000000 ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b
return sum(__lowerCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 625 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
_lowercase : Optional[Any] = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_lowercase : int = BASE_URL + "/user"
# https://github.com/settings/tokens
_lowercase : List[str] = os.environ.get("USER_TOKEN", "")
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Any ={
'''Authorization''': f'''token {auth_token}''',
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 625 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = BlenderbotSmallConfig
_a = {}
_a = 'gelu'
def __init__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=False, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : str=32, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=20, lowerCamelCase : int=2, lowerCamelCase : Any=1, lowerCamelCase : Optional[Any]=0, )-> List[str]:
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Tuple =is_training
lowerCamelCase__ : Dict =use_labels
lowerCamelCase__ : List[Any] =vocab_size
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : str =num_hidden_layers
lowerCamelCase__ : Union[str, Any] =num_attention_heads
lowerCamelCase__ : Any =intermediate_size
lowerCamelCase__ : Dict =hidden_dropout_prob
lowerCamelCase__ : List[Any] =attention_probs_dropout_prob
lowerCamelCase__ : str =max_position_embeddings
lowerCamelCase__ : Optional[int] =eos_token_id
lowerCamelCase__ : str =pad_token_id
lowerCamelCase__ : Union[str, Any] =bos_token_id
def snake_case ( self : Any )-> Any:
lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCamelCase__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Any =tf.concat([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : int =self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCamelCase__ : Optional[int] =prepare_blenderbot_small_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return config, inputs_dict
def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Any )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder()
lowerCamelCase__ : List[Any] =inputs_dict['''input_ids''']
lowerCamelCase__ : Optional[int] =input_ids[:1, :]
lowerCamelCase__ : str =inputs_dict['''attention_mask'''][:1, :]
lowerCamelCase__ : Union[str, Any] =inputs_dict['''head_mask''']
lowerCamelCase__ : Optional[Any] =1
# first forward pass
lowerCamelCase__ : Dict =model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : List[str] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowerCamelCase__ : List[str] =tf.concat([input_ids, next_tokens], axis=-1 )
lowerCamelCase__ : str =tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase, attention_mask=lowerCamelCase )[0]
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : Tuple =int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowerCamelCase__ : int =output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : List[str] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-3 )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : List[str] =tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : str =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_a = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_a = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a = True
_a = False
_a = False
def snake_case ( self : Any )-> str:
lowerCamelCase__ : Tuple =TFBlenderbotSmallModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase )
def snake_case ( self : Any )-> Optional[int]:
self.config_tester.run_common_tests()
def snake_case ( self : int )-> str:
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
_a = 'facebook/blenderbot_small-90M'
@cached_property
def snake_case ( self : Any )-> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Dict =self.tokenizer(self.src_text, return_tensors='''tf''' )
lowerCamelCase__ : Any =self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase, )
lowerCamelCase__ : Any =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 625 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = DiTPipeline
_a = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_a = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
_a = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_a = False
def snake_case ( self : List[str] )-> List[str]:
torch.manual_seed(0 )
lowerCamelCase__ : int =TransformeraDModel(
sample_size=16, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=lowerCamelCase, activation_fn='''gelu-approximate''', num_embeds_ada_norm=1000, norm_type='''ada_norm_zero''', norm_elementwise_affine=lowerCamelCase, )
lowerCamelCase__ : Tuple =AutoencoderKL()
lowerCamelCase__ : Optional[int] =DDIMScheduler()
lowerCamelCase__ : Dict ={'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def snake_case ( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Optional[int]=0 )-> Dict:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : str =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Dict =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def snake_case ( self : Dict )-> int:
lowerCamelCase__ : Union[str, Any] ='''cpu'''
lowerCamelCase__ : Optional[Any] =self.get_dummy_components()
lowerCamelCase__ : Any =self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : List[str] =self.get_dummy_inputs(lowerCamelCase )
lowerCamelCase__ : Any =pipe(**lowerCamelCase ).images
lowerCamelCase__ : List[str] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 16, 16, 3) )
lowerCamelCase__ : Union[str, Any] =np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowerCamelCase__ : Optional[int] =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase, 1E-3 )
def snake_case ( self : str )-> List[str]:
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase, expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def snake_case ( self : Dict )-> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Dict )-> str:
lowerCamelCase__ : Any =torch.manual_seed(0 )
lowerCamelCase__ : Any =DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
lowerCamelCase__ : Optional[Any] =['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
lowerCamelCase__ : int =pipe.get_label_ids(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =pipe(lowerCamelCase, generator=lowerCamelCase, num_inference_steps=40, output_type='''np''' ).images
for word, image in zip(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Optional[int] =load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def snake_case ( self : Any )-> int:
lowerCamelCase__ : Union[str, Any] =DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
lowerCamelCase__ : Any =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
lowerCamelCase__ : Any =['''vase''', '''umbrella''']
lowerCamelCase__ : Dict =pipe.get_label_ids(lowerCamelCase )
lowerCamelCase__ : Tuple =torch.manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(lowerCamelCase, generator=lowerCamelCase, num_inference_steps=25, output_type='''np''' ).images
for word, image in zip(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ):
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
# Base Case
if curr_ind == len(__lowerCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__lowerCamelCase ) ):
if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Insert current vertex into path as next transition
lowerCamelCase__ : Tuple =next_ver
# Validate created path
if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase__ : int =-1
return False
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase__ : Union[str, Any] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
| 625 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowercase : List[Any] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : List[str] =torch.load(__lowerCamelCase , map_location='''cpu''' )
return sd
def snake_case__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int]=rename_keys_prefix ):
"""simple docstring"""
lowerCamelCase__ : Dict =OrderedDict()
lowerCamelCase__ : str =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowerCamelCase__ : Tuple =key
for name_pair in rename_keys_prefix:
lowerCamelCase__ : Optional[Any] =new_key.replace(name_pair[0] , name_pair[1] )
lowerCamelCase__ : List[str] =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowerCamelCase__ : Optional[Any] =new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
lowerCamelCase__ : Dict ='''pretraining'''
if "vcr" in checkpoint_path:
lowerCamelCase__ : str ={'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
lowerCamelCase__ : int ={'''visual_embedding_dim''': 2048}
elif "vqa" in checkpoint_path:
lowerCamelCase__ : int ={'''visual_embedding_dim''': 2048}
elif "nlvr" in checkpoint_path:
lowerCamelCase__ : Union[str, Any] ={'''visual_embedding_dim''': 1024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
lowerCamelCase__ : Union[str, Any] ={'''visual_embedding_dim''': 512}
lowerCamelCase__ : List[str] ='''multichoice'''
elif "vqa_advanced" in checkpoint_path:
lowerCamelCase__ : List[Any] ={'''visual_embedding_dim''': 2048}
lowerCamelCase__ : List[Any] ='''vqa_advanced'''
elif "vqa" in checkpoint_path:
lowerCamelCase__ : Any ={'''visual_embedding_dim''': 2048, '''num_labels''': 3129}
lowerCamelCase__ : Optional[Any] ='''vqa'''
elif "nlvr" in checkpoint_path:
lowerCamelCase__ : List[str] ={
'''visual_embedding_dim''': 1024,
'''num_labels''': 2,
}
lowerCamelCase__ : Any ='''nlvr'''
lowerCamelCase__ : Tuple =VisualBertConfig(**__lowerCamelCase )
# Load State Dict
lowerCamelCase__ : Any =load_state_dict(__lowerCamelCase )
lowerCamelCase__ : Any =get_new_dict(__lowerCamelCase , __lowerCamelCase )
if model_type == "pretraining":
lowerCamelCase__ : int =VisualBertForPreTraining(__lowerCamelCase )
elif model_type == "vqa":
lowerCamelCase__ : Optional[int] =VisualBertForQuestionAnswering(__lowerCamelCase )
elif model_type == "nlvr":
lowerCamelCase__ : str =VisualBertForVisualReasoning(__lowerCamelCase )
elif model_type == "multichoice":
lowerCamelCase__ : Tuple =VisualBertForMultipleChoice(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# Save Checkpoints
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowercase : Optional[int] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 625 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase : List[str] = 2_5_0_0_0_4
_lowercase : Optional[Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = MBartTokenizer
_a = MBartTokenizerFast
_a = True
_a = True
def snake_case ( self : Tuple )-> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Union[str, Any] =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Dict )-> Union[str, Any]:
lowerCamelCase__ : Any =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
lowerCamelCase__ : List[Any] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowerCamelCase__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
lowerCamelCase__ : str =tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def snake_case ( self : Tuple )-> List[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ : int =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[str] =tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase__ : List[str] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Any =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ : Dict =tempfile.mkdtemp()
lowerCamelCase__ : List[str] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Tuple =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Optional[int] =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Any =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ : Optional[int] =tempfile.mkdtemp()
lowerCamelCase__ : int =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Dict =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : int =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = 'facebook/mbart-large-en-ro'
_a = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_a = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_a = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case ( cls : List[Any] )-> Optional[int]:
lowerCamelCase__ : MBartTokenizer =MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' )
lowerCamelCase__ : Optional[int] =1
return cls
def snake_case ( self : Optional[Any] )-> List[str]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_0020 )
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
def snake_case ( self : Optional[Any] )-> str:
self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids )
lowerCamelCase__ : Optional[int] =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Optional[int] =['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0], lowerCamelCase )
lowerCamelCase__ : Dict =10
lowerCamelCase__ : Optional[int] =self.tokenizer(lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], lowerCamelCase )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
def snake_case ( self : int )-> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_0026, 25_0001] )
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : int =tempfile.mkdtemp()
lowerCamelCase__ : Optional[int] =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =MBartTokenizer.from_pretrained(lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase )
@require_torch
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ : Optional[Any] =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, return_tensors='''pt''' )
lowerCamelCase__ : Dict =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case ( self : Optional[Any] )-> Any:
lowerCamelCase__ : str =self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', )
lowerCamelCase__ : List[Any] =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
lowerCamelCase__ : Any =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case ( self : List[Any] )-> Dict:
lowerCamelCase__ : Any =self.tokenizer(self.src_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=3, return_tensors='''pt''' )
lowerCamelCase__ : Tuple =self.tokenizer(
text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=10, return_tensors='''pt''' )
lowerCamelCase__ : Union[str, Any] =targets['''input_ids''']
lowerCamelCase__ : List[Any] =shift_tokens_right(lowerCamelCase, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : str =self.tokenizer._build_translation_inputs(
'''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase ), {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 25_0004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
}, )
| 625 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
_a = ['onnx']
def __init__( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : str )-> Optional[int]:
requires_backends(self, ['''onnx'''] )
@classmethod
def snake_case ( cls : List[str], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
@classmethod
def snake_case ( cls : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : Tuple )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 625 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : int =[1]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] =0, 0, 0
lowerCamelCase__ : Tuple =ugly_nums[ia] * 2
lowerCamelCase__ : int =ugly_nums[ia] * 3
lowerCamelCase__ : Any =ugly_nums[ia] * 5
for _ in range(1 , __lowerCamelCase ):
lowerCamelCase__ : Optional[int] =min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
ugly_nums.append(__lowerCamelCase )
if next_num == next_a:
ia += 1
lowerCamelCase__ : int =ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCamelCase__ : Tuple =ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCamelCase__ : int =ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'{ugly_numbers(2_0_0) = }')
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 22 ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =range(1 , __lowerCamelCase )
lowerCamelCase__ : str =range(1 , __lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }')
| 625 | 1 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_lowercase : Dict = logging.get_logger(__name__)
_lowercase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_lowercase : List[str] = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_lowercase : Dict = {
"Salesforce/codegen-350M-mono": 2_0_4_8,
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
_a = CodeGenTokenizer
def __init__( self : Optional[Any], lowerCamelCase : Dict=None, lowerCamelCase : str=None, lowerCamelCase : Optional[int]=None, lowerCamelCase : List[Any]="<|endoftext|>", lowerCamelCase : Union[str, Any]="<|endoftext|>", lowerCamelCase : List[Any]="<|endoftext|>", lowerCamelCase : str=False, **lowerCamelCase : int, )-> List[Any]:
super().__init__(
lowerCamelCase, lowerCamelCase, tokenizer_file=lowerCamelCase, unk_token=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, add_prefix_space=lowerCamelCase, **lowerCamelCase, )
if kwargs.pop('''add_bos_token''', lowerCamelCase ):
lowerCamelCase__ : Optional[Any] =kwargs.pop('''name_or_path''', '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
lowerCamelCase__ : List[Any] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', lowerCamelCase ) != add_prefix_space:
lowerCamelCase__ : Any =getattr(lowerCamelCase, pre_tok_state.pop('''type''' ) )
lowerCamelCase__ : Union[str, Any] =add_prefix_space
lowerCamelCase__ : Optional[Any] =pre_tok_class(**lowerCamelCase )
lowerCamelCase__ : Tuple =add_prefix_space
def snake_case ( self : Optional[Any], *lowerCamelCase : Optional[Any], **lowerCamelCase : List[Any] )-> BatchEncoding:
lowerCamelCase__ : List[Any] =kwargs.get('''is_split_into_words''', lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase, **lowerCamelCase )
def snake_case ( self : int, *lowerCamelCase : Dict, **lowerCamelCase : int )-> BatchEncoding:
lowerCamelCase__ : Any =kwargs.get('''is_split_into_words''', lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase, **lowerCamelCase )
def snake_case ( self : Dict, lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
lowerCamelCase__ : Dict =self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
def snake_case ( self : int, lowerCamelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], lowerCamelCase : bool = False, lowerCamelCase : bool = None, lowerCamelCase : Optional[List[str]] = None, **lowerCamelCase : Tuple, )-> str:
lowerCamelCase__ : Tuple =super().decode(
token_ids=lowerCamelCase, skip_special_tokens=lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase, **lowerCamelCase, )
if truncate_before_pattern is not None and len(lowerCamelCase ) > 0:
lowerCamelCase__ : List[str] =self.truncate(lowerCamelCase, lowerCamelCase )
return decoded_text
def snake_case ( self : Optional[Any], lowerCamelCase : Any, lowerCamelCase : str )-> Optional[int]:
def find_re(lowerCamelCase : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : str ):
lowerCamelCase__ : List[str] =pattern.search(lowerCamelCase, lowerCamelCase )
return m.start() if m else -1
lowerCamelCase__ : int =[re.compile(lowerCamelCase, re.MULTILINE ) for pattern in truncate_before_pattern]
lowerCamelCase__ : Union[str, Any] =list(re.finditer('''^print''', lowerCamelCase, re.MULTILINE ) )
if len(lowerCamelCase ) > 1:
lowerCamelCase__ : Dict =completion[: prints[1].start()]
lowerCamelCase__ : Dict =list(re.finditer('''^def''', lowerCamelCase, re.MULTILINE ) )
if len(lowerCamelCase ) > 1:
lowerCamelCase__ : Union[str, Any] =completion[: defs[1].start()]
lowerCamelCase__ : str =0
lowerCamelCase__ : Union[str, Any] =[
pos for pos in [find_re(lowerCamelCase, lowerCamelCase, lowerCamelCase ) for terminal in terminals] if pos != -1
]
if len(lowerCamelCase ) > 0:
return completion[: min(lowerCamelCase )]
else:
return completion
| 625 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def snake_case ( self : Dict, lowerCamelCase : List[str], lowerCamelCase : Any )-> Union[str, Any]:
pass
def snake_case ( self : List[str] )-> List[str]:
pass
def snake_case ( self : Optional[Any] )-> str:
pass
def snake_case ( self : Union[str, Any], lowerCamelCase : np.ndarray, lowerCamelCase : np.ndarray, lowerCamelCase : float )-> Dict:
lowerCamelCase__ : Union[str, Any] =np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Any=None, **lowerCamelCase : str )-> int:
lowerCamelCase__ : List[str] =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Dict =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str=None, **lowerCamelCase : List[Any] )-> int:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=None, **lowerCamelCase : int )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Optional[int] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[Any] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : int =output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : List[str] =after_output[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-3 )
def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[str] =model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase )
lowerCamelCase__ : int =output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Tuple =to_atuple(vision_model.config.image_size )
lowerCamelCase__ : Optional[Any] =to_atuple(vision_model.config.patch_size )
lowerCamelCase__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCamelCase__ : int =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCamelCase__ : List[Any] =output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] )-> Any:
pt_model.to(lowerCamelCase )
pt_model.eval()
# prepare inputs
lowerCamelCase__ : Any =inputs_dict
lowerCamelCase__ : Any ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCamelCase__ : List[str] =pt_model(**lowerCamelCase ).to_tuple()
lowerCamelCase__ : Optional[Any] =fx_model(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase )
lowerCamelCase__ : List[Any] =fx_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : str =VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase )
pt_model_loaded.to(lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2 )
def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : str )-> List[Any]:
lowerCamelCase__ : Any =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : str =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase )
lowerCamelCase__ : Tuple =fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ : Dict =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Any =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : int =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase )
def snake_case ( self : Tuple )-> Any:
lowerCamelCase__ : Tuple =self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase )
def snake_case ( self : str )-> Any:
lowerCamelCase__ : str =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase )
@is_pt_flax_cross_test
def snake_case ( self : Tuple )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs()
lowerCamelCase__ : Union[str, Any] =config_inputs_dict.pop('''vision_config''' )
lowerCamelCase__ : Optional[Any] =config_inputs_dict.pop('''text_config''' )
lowerCamelCase__ : Tuple =config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase )
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@slow
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.get_pretrained_model_and_inputs()
lowerCamelCase__ : Optional[int] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[str] =outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase )
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[Any] =after_outputs[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-5 )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : List[str] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : List[str] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : Optional[int] =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Any ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : int )-> int:
lowerCamelCase__ : str =FlaxViTModel(lowerCamelCase )
lowerCamelCase__ : Any =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : int )-> Optional[int]:
lowerCamelCase__ : Any =FlaxViTModelTester(self )
lowerCamelCase__ : Union[str, Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =vit_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Any =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Union[str, Any] =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : Optional[Any] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : Union[str, Any] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : str =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Dict )-> Dict:
lowerCamelCase__ : str =FlaxCLIPVisionModel(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[Any] =FlaxCLIPVisionModelTester(self )
lowerCamelCase__ : List[Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =clip_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[int] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : List[Any] =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Any =FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase__ : Dict =processor(
text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' )
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
lowerCamelCase__ : Any =np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3 ) )
| 625 | 1 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_lowercase : Dict = logging.get_logger(__name__)
_lowercase : Any = "T5Config"
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'mt5'
_a = MTaConfig
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'mt5'
_a = MTaConfig
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'mt5'
_a = MTaConfig
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if index == number_of_items:
return 0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : List[str] =knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 )
if weights[index] <= max_weight:
lowerCamelCase__ : Dict =values[index] + knapsack(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_weight - weights[index] , index + 1 )
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def snake_case ( lowerCamelCase : ArgumentParser )-> int:
raise NotImplementedError()
@abstractmethod
def snake_case ( self : List[Any] )-> Optional[Any]:
raise NotImplementedError()
| 625 |
"""simple docstring"""
_lowercase : Optional[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 625 | 1 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : complex , __lowerCamelCase : str = "x" , __lowerCamelCase : float = 10**-10 , __lowerCamelCase : int = 1 , ):
"""simple docstring"""
lowerCamelCase__ : Any =symbols(__lowerCamelCase )
lowerCamelCase__ : Optional[int] =lambdify(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[str] =lambdify(__lowerCamelCase , diff(__lowerCamelCase , __lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] =starting_point
while True:
if diff_function(__lowerCamelCase ) != 0:
lowerCamelCase__ : Tuple =prev_guess - multiplicity * func(__lowerCamelCase ) / diff_function(
__lowerCamelCase )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowerCamelCase__ : int =next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[int] ):
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCamelCase__ : Any =numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
lowerCamelCase__ : Dict =numbers[i]
if number < 0:
lowerCamelCase__ , lowerCamelCase__ : List[Any] =min_till_now, max_till_now
lowerCamelCase__ : Optional[int] =max(__lowerCamelCase , max_till_now * number )
lowerCamelCase__ : Dict =min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
lowerCamelCase__ : Tuple =max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 625 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : str = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'bridgetower_vision_model'
def __init__( self : int, lowerCamelCase : str=768, lowerCamelCase : Union[str, Any]=12, lowerCamelCase : List[Any]=3, lowerCamelCase : Tuple=16, lowerCamelCase : Tuple=288, lowerCamelCase : str=1, lowerCamelCase : Optional[Any]=1E-05, lowerCamelCase : Any=False, lowerCamelCase : Any=True, lowerCamelCase : List[Any]=False, **lowerCamelCase : List[Any], )-> Dict:
super().__init__(**lowerCamelCase )
lowerCamelCase__ : Tuple =hidden_size
lowerCamelCase__ : Optional[Any] =num_hidden_layers
lowerCamelCase__ : Optional[Any] =num_channels
lowerCamelCase__ : Tuple =patch_size
lowerCamelCase__ : Dict =image_size
lowerCamelCase__ : Optional[int] =initializer_factor
lowerCamelCase__ : Optional[Any] =layer_norm_eps
lowerCamelCase__ : List[Any] =stop_gradient
lowerCamelCase__ : Any =share_layernorm
lowerCamelCase__ : Optional[int] =remove_last_layer
@classmethod
def snake_case ( cls : Dict, lowerCamelCase : Union[str, os.PathLike], **lowerCamelCase : Union[str, Any] )-> "PretrainedConfig":
lowerCamelCase__ , lowerCamelCase__ : int =cls.get_config_dict(lowerCamelCase, **lowerCamelCase )
if config_dict.get('''model_type''' ) == "bridgetower":
lowerCamelCase__ : Optional[Any] =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase, **lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'bridgetower_text_model'
def __init__( self : List[str], lowerCamelCase : Union[str, Any]=5_0265, lowerCamelCase : Dict=768, lowerCamelCase : Any=12, lowerCamelCase : Any=12, lowerCamelCase : Optional[int]=1, lowerCamelCase : int=3072, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : List[str]=0.1, lowerCamelCase : Dict=514, lowerCamelCase : Tuple=1, lowerCamelCase : Union[str, Any]=1E-05, lowerCamelCase : Dict=1, lowerCamelCase : Optional[int]=0, lowerCamelCase : str=2, lowerCamelCase : Any="absolute", lowerCamelCase : List[str]=True, **lowerCamelCase : Optional[Any], )-> str:
super().__init__(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =vocab_size
lowerCamelCase__ : List[str] =hidden_size
lowerCamelCase__ : Any =num_hidden_layers
lowerCamelCase__ : Tuple =num_attention_heads
lowerCamelCase__ : List[str] =hidden_act
lowerCamelCase__ : Any =initializer_factor
lowerCamelCase__ : int =intermediate_size
lowerCamelCase__ : Tuple =hidden_dropout_prob
lowerCamelCase__ : int =attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] =max_position_embeddings
lowerCamelCase__ : Optional[Any] =type_vocab_size
lowerCamelCase__ : Union[str, Any] =layer_norm_eps
lowerCamelCase__ : List[Any] =position_embedding_type
lowerCamelCase__ : Union[str, Any] =use_cache
lowerCamelCase__ : List[str] =pad_token_id
lowerCamelCase__ : int =bos_token_id
lowerCamelCase__ : Optional[Any] =eos_token_id
@classmethod
def snake_case ( cls : int, lowerCamelCase : Union[str, os.PathLike], **lowerCamelCase : int )-> "PretrainedConfig":
lowerCamelCase__ , lowerCamelCase__ : int =cls.get_config_dict(lowerCamelCase, **lowerCamelCase )
if config_dict.get('''model_type''' ) == "bridgetower":
lowerCamelCase__ : Any =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase, **lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'bridgetower'
def __init__( self : str, lowerCamelCase : str=True, lowerCamelCase : str="gelu", lowerCamelCase : Tuple=768, lowerCamelCase : Dict=1, lowerCamelCase : Any=1E-05, lowerCamelCase : List[Any]=False, lowerCamelCase : List[Any]="add", lowerCamelCase : Any=12, lowerCamelCase : int=6, lowerCamelCase : Any=False, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Optional[int]=None, lowerCamelCase : int=None, **lowerCamelCase : List[str], )-> Union[str, Any]:
# TODO: remove this once the Hub files are updated.
lowerCamelCase__ : Any =kwargs.pop('''text_config_dict''', lowerCamelCase )
lowerCamelCase__ : Optional[Any] =kwargs.pop('''vision_config_dict''', lowerCamelCase )
super().__init__(**lowerCamelCase )
lowerCamelCase__ : List[Any] =share_cross_modal_transformer_layers
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : Optional[Any] =hidden_size
lowerCamelCase__ : Optional[Any] =initializer_factor
lowerCamelCase__ : Tuple =layer_norm_eps
lowerCamelCase__ : Tuple =share_link_tower_layers
lowerCamelCase__ : Optional[int] =link_tower_type
lowerCamelCase__ : Dict =num_attention_heads
lowerCamelCase__ : List[Any] =num_hidden_layers
lowerCamelCase__ : List[str] =tie_word_embeddings
lowerCamelCase__ : Optional[Any] =init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase__ : str ={}
logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' )
if vision_config is None:
lowerCamelCase__ : Tuple ={}
logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' )
lowerCamelCase__ : int =BridgeTowerTextConfig(**lowerCamelCase )
lowerCamelCase__ : int =BridgeTowerVisionConfig(**lowerCamelCase )
@classmethod
def snake_case ( cls : Optional[int], lowerCamelCase : BridgeTowerTextConfig, lowerCamelCase : BridgeTowerVisionConfig, **lowerCamelCase : Dict )-> Optional[Any]:
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ : Any =copy.deepcopy(self.__dict__ )
lowerCamelCase__ : int =self.text_config.to_dict()
lowerCamelCase__ : List[str] =self.vision_config.to_dict()
lowerCamelCase__ : Optional[int] =self.__class__.model_type
return output
| 625 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 42
_a = 42
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 42
_a = (1_6, 3_2, 9_6, 2_5_6)
_a = jnp.floataa
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Tuple =nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
lowerCamelCase__ : Dict =[]
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ : Dict =self.block_out_channels[i]
lowerCamelCase__ : Dict =self.block_out_channels[i + 1]
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Optional[int] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Any =blocks
lowerCamelCase__ : Optional[int] =nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : Any, lowerCamelCase : int )-> List[str]:
lowerCamelCase__ : Tuple =self.conv_in(lowerCamelCase )
lowerCamelCase__ : Dict =nn.silu(lowerCamelCase )
for block in self.blocks:
lowerCamelCase__ : str =block(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =self.conv_out(lowerCamelCase )
return embedding
@flax_register_to_config
class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_a = 3_2
_a = 4
_a = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_a = False
_a = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_a = 2
_a = 8
_a = None
_a = 1_2_8_0
_a = 0.0
_a = False
_a = jnp.floataa
_a = True
_a = 0
_a = "rgb"
_a = (1_6, 3_2, 9_6, 2_5_6)
def snake_case ( self : str, lowerCamelCase : jax.random.KeyArray )-> FrozenDict:
# init input tensors
lowerCamelCase__ : int =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : int =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ : Union[str, Any] =jnp.ones((1,), dtype=jnp.intaa )
lowerCamelCase__ : str =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
lowerCamelCase__ : Any =(1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ : Optional[Any] =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =jax.random.split(lowerCamelCase )
lowerCamelCase__ : Dict ={'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )["params"]
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Optional[int] =self.block_out_channels
lowerCamelCase__ : Tuple =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : List[Any] =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : int =nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
lowerCamelCase__ : str =FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
lowerCamelCase__ : Dict =FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype )
lowerCamelCase__ : List[Any] =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
lowerCamelCase__ : Dict =self.only_cross_attention
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =(only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[str] =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : Union[str, Any] =[]
lowerCamelCase__ : Dict =[]
lowerCamelCase__ : List[Any] =block_out_channels[0]
lowerCamelCase__ : List[Any] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : List[Any] =output_channel
lowerCamelCase__ : str =block_out_channels[i]
lowerCamelCase__ : Dict =i == len(lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : str =FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
lowerCamelCase__ : List[Any] =FlaxDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(lowerCamelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
if not is_final_block:
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
lowerCamelCase__ : int =down_blocks
lowerCamelCase__ : List[str] =controlnet_down_blocks
# mid
lowerCamelCase__ : Tuple =block_out_channels[-1]
lowerCamelCase__ : List[Any] =FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : int, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : float = 1.0, lowerCamelCase : bool = True, lowerCamelCase : bool = False, )-> Union[FlaxControlNetOutput, Tuple]:
lowerCamelCase__ : int =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ : int =jnp.flip(lowerCamelCase, axis=1 )
# 1. time
if not isinstance(lowerCamelCase, jnp.ndarray ):
lowerCamelCase__ : Any =jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(lowerCamelCase, jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : int =jnp.expand_dims(lowerCamelCase, 0 )
lowerCamelCase__ : Optional[Any] =self.time_proj(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.time_embedding(lowerCamelCase )
# 2. pre-process
lowerCamelCase__ : Optional[int] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : Dict =self.conv_in(lowerCamelCase )
lowerCamelCase__ : List[str] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : int =self.controlnet_cond_embedding(lowerCamelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase__ : Union[str, Any] =(sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : Dict =down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : Tuple =down_block(lowerCamelCase, lowerCamelCase, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ : Optional[int] =self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ : Optional[Any] =()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks ):
lowerCamelCase__ : Union[str, Any] =controlnet_block(lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : List[str] =controlnet_down_block_res_samples
lowerCamelCase__ : List[str] =self.controlnet_mid_block(lowerCamelCase )
# 6. scaling
lowerCamelCase__ : Union[str, Any] =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase )
| 625 | 1 |
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[str] ):
"""simple docstring"""
# load base model
lowerCamelCase__ : Tuple =StableDiffusionPipeline.from_pretrained(__lowerCamelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowerCamelCase__ : Tuple =load_file(__lowerCamelCase )
lowerCamelCase__ : Optional[int] =[]
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowerCamelCase__ : List[str] =key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
lowerCamelCase__ : Optional[Any] =pipeline.text_encoder
else:
lowerCamelCase__ : Dict =key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
lowerCamelCase__ : Any =pipeline.unet
# find the target layer
lowerCamelCase__ : List[Any] =layer_infos.pop(0 )
while len(__lowerCamelCase ) > -1:
try:
lowerCamelCase__ : Optional[Any] =curr_layer.__getattr__(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
lowerCamelCase__ : int =layer_infos.pop(0 )
elif len(__lowerCamelCase ) == 0:
break
except Exception:
if len(__lowerCamelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowerCamelCase__ : Tuple =layer_infos.pop(0 )
lowerCamelCase__ : Tuple =[]
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(__lowerCamelCase )
else:
pair_keys.append(__lowerCamelCase )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowerCamelCase__ : List[Any] =state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowerCamelCase__ : Dict =state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCamelCase , __lowerCamelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
lowerCamelCase__ : str =state_dict[pair_keys[0]].to(torch.floataa )
lowerCamelCase__ : Union[str, Any] =state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCamelCase , __lowerCamelCase )
# update visited list
for item in pair_keys:
visited.append(__lowerCamelCase )
return pipeline
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
_lowercase : int = parser.parse_args()
_lowercase : Tuple = args.base_model_path
_lowercase : Dict = args.checkpoint_path
_lowercase : Any = args.dump_path
_lowercase : str = args.lora_prefix_unet
_lowercase : Union[str, Any] = args.lora_prefix_text_encoder
_lowercase : Dict = args.alpha
_lowercase : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowercase : int = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 625 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["CLIPFeatureExtractor"]
_lowercase : int = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 625 | 1 |
"""simple docstring"""
from __future__ import annotations
import requests
_lowercase : Any = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : int = 1 , __lowerCamelCase : str = "new" , __lowerCamelCase : list | None = None ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__lowerCamelCase ) - valid_terms ) ):
lowerCamelCase__ : Optional[Any] =f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 429:
raise requests.HTTPError
lowerCamelCase__ : List[str] =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__lowerCamelCase )}
lowerCamelCase__ : List[Any] ={}
for id_ in range(__lowerCamelCase ):
lowerCamelCase__ : List[str] ={
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 625 |
"""simple docstring"""
import os
def snake_case__ ( ):
"""simple docstring"""
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowerCamelCase__ : Tuple =str(file.readlines()[0] )
lowerCamelCase__ : int =names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : str =0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict =0
return total_score
if __name__ == "__main__":
print(solution())
| 625 | 1 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str], lowerCamelCase : Callable, lowerCamelCase : Optional[Features] = None, lowerCamelCase : str = None, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : Optional[dict] = None, lowerCamelCase : Optional[int] = None, **lowerCamelCase : Optional[int], )-> Optional[int]:
super().__init__(
features=lowerCamelCase, cache_dir=lowerCamelCase, keep_in_memory=lowerCamelCase, streaming=lowerCamelCase, num_proc=lowerCamelCase, **lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =Generator(
cache_dir=lowerCamelCase, features=lowerCamelCase, generator=lowerCamelCase, gen_kwargs=lowerCamelCase, **lowerCamelCase, )
def snake_case ( self : Union[str, Any] )-> Optional[int]:
# Build iterable dataset
if self.streaming:
lowerCamelCase__ : Any =self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
lowerCamelCase__ : List[str] =None
lowerCamelCase__ : List[str] =None
lowerCamelCase__ : Dict =None
lowerCamelCase__ : Optional[Any] =None
self.builder.download_and_prepare(
download_config=lowerCamelCase, download_mode=lowerCamelCase, verification_mode=lowerCamelCase, base_path=lowerCamelCase, num_proc=self.num_proc, )
lowerCamelCase__ : Dict =self.builder.as_dataset(
split='''train''', verification_mode=lowerCamelCase, in_memory=self.keep_in_memory )
return dataset
| 625 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : int )-> None:
lowerCamelCase__ : str =value
lowerCamelCase__ : Node | None =None
lowerCamelCase__ : Node | None =None
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int, lowerCamelCase : Node )-> None:
lowerCamelCase__ : Any =tree
def snake_case ( self : str, lowerCamelCase : Node | None )-> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict )-> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 1 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_lowercase : List[str] = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
_lowercase : int = dataset.iloc[:, 1:2].values
_lowercase : str = dataset.iloc[:, 2].values
_lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = train_test_split(X, y, test_size=0.2, random_state=0)
_lowercase : Optional[Any] = PolynomialFeatures(degree=4)
_lowercase : List[Any] = poly_reg.fit_transform(X)
_lowercase : List[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def snake_case__ ( ):
"""simple docstring"""
plt.scatter(__lowerCamelCase , __lowerCamelCase , color='''red''' )
plt.plot(__lowerCamelCase , pol_reg.predict(poly_reg.fit_transform(__lowerCamelCase ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 625 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowercase : List[str] = logging.getLogger(__name__)
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
"""simple docstring"""
# save results
if os.path.exists(__lowerCamelCase ):
if os.path.exists(os.path.join(__lowerCamelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''config.json''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''config.json''' ) )
if os.path.exists(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =2
if unlogit:
lowerCamelCase__ : Any =torch.pow(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[str] =p * torch.log(__lowerCamelCase )
lowerCamelCase__ : Tuple =0
return -plogp.sum(dim=-1 )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(__lowerCamelCase ) ) ) )
for row in range(len(__lowerCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=False ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Tuple =model.config.num_hidden_layers, model.config.num_attention_heads
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
if head_mask is None:
lowerCamelCase__ : List[Any] =torch.ones(__lowerCamelCase , __lowerCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : List[str] =0.0
lowerCamelCase__ : Union[str, Any] =0.0
for step, inputs in enumerate(tqdm(__lowerCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowerCamelCase__ : Any =tuple(t.to(args.device ) for t in inputs )
((lowerCamelCase__) , ) : Any =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCamelCase__ : Dict =model(__lowerCamelCase , labels=__lowerCamelCase , head_mask=__lowerCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Any =entropy(attn.detach() , __lowerCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCamelCase__ : int =2
lowerCamelCase__ : List[str] =torch.pow(torch.pow(__lowerCamelCase , __lowerCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
lowerCamelCase__ : int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(__lowerCamelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(__lowerCamelCase )
logger.info('''Head ranked by importance scores''' )
lowerCamelCase__ : Optional[int] =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCamelCase__ : Dict =torch.arange(
head_importance.numel() , device=args.device )
lowerCamelCase__ : Any =head_ranks.view_as(__lowerCamelCase )
print_ad_tensor(__lowerCamelCase )
return attn_entropy, head_importance, total_loss
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase )
lowerCamelCase__ : int =1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __lowerCamelCase , original_score * args.masking_threshold )
lowerCamelCase__ : Dict =torch.ones_like(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCamelCase__ : List[Any] =original_score
while current_score >= original_score * args.masking_threshold:
lowerCamelCase__ : List[Any] =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCamelCase__ : int =float('''Inf''' )
lowerCamelCase__ : Union[str, Any] =head_importance.view(-1 ).sort()[1]
if len(__lowerCamelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowerCamelCase__ : List[str] =current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowerCamelCase__ : Optional[int] =new_head_mask.view(-1 )
lowerCamelCase__ : Optional[Any] =0.0
lowerCamelCase__ : Dict =new_head_mask.view_as(__lowerCamelCase )
lowerCamelCase__ : Tuple =new_head_mask.clone().detach()
print_ad_tensor(__lowerCamelCase )
# Compute metric and head importance again
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Any =1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(__lowerCamelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Tuple =1 / loss
lowerCamelCase__ : Optional[Any] =datetime.now() - before_time
lowerCamelCase__ : int =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Optional[int] =[
v,
]
assert sum(len(__lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCamelCase )
lowerCamelCase__ : List[str] =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase , actually_pruned=__lowerCamelCase , )
lowerCamelCase__ : str =1 / loss
lowerCamelCase__ : Union[str, Any] =datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __lowerCamelCase , __lowerCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __lowerCamelCase , __lowerCamelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(__lowerCamelCase , args.output_dir )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__lowerCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__lowerCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__lowerCamelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=__lowerCamelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowerCamelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__lowerCamelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=__lowerCamelCase , default=42 )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
lowerCamelCase__ : List[Any] =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCamelCase__ : Dict =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowerCamelCase__ : Dict =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCamelCase__ : str =torch.device('''cuda''' , args.local_rank )
lowerCamelCase__ : Any =1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCamelCase__ : Union[str, Any] =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCamelCase__ : List[Any] =nn.parallel.DistributedDataParallel(
__lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCamelCase )
elif args.n_gpu > 1:
lowerCamelCase__ : int =nn.DataParallel(__lowerCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Prepare dataset
lowerCamelCase__ : Union[str, Any] =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCamelCase__ : Any =(torch.from_numpy(__lowerCamelCase ),)
lowerCamelCase__ : List[Any] =TensorDataset(*__lowerCamelCase )
lowerCamelCase__ : List[str] =RandomSampler(__lowerCamelCase )
lowerCamelCase__ : Dict =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCamelCase__ : Optional[int] =mask_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
prune_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 625 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_lowercase : List[Any] = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(lowerCAmelCase_ )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'rag'
_a = True
def __init__( self : List[str], lowerCamelCase : Optional[int]=None, lowerCamelCase : List[Any]=True, lowerCamelCase : Any=None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : int=None, lowerCamelCase : int=None, lowerCamelCase : Tuple=" / ", lowerCamelCase : Union[str, Any]=" // ", lowerCamelCase : List[Any]=5, lowerCamelCase : Optional[int]=300, lowerCamelCase : List[str]=768, lowerCamelCase : Optional[int]=8, lowerCamelCase : Optional[Any]="wiki_dpr", lowerCamelCase : str="train", lowerCamelCase : Dict="compressed", lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Dict=None, lowerCamelCase : Optional[Any]=False, lowerCamelCase : int=False, lowerCamelCase : Any=0.0, lowerCamelCase : Tuple=True, lowerCamelCase : Tuple=False, lowerCamelCase : Optional[Any]=False, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Dict=True, lowerCamelCase : Dict=None, **lowerCamelCase : List[Any], )-> Tuple:
super().__init__(
bos_token_id=lowerCamelCase, pad_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, decoder_start_token_id=lowerCamelCase, forced_eos_token_id=lowerCamelCase, is_encoder_decoder=lowerCamelCase, prefix=lowerCamelCase, vocab_size=lowerCamelCase, **lowerCamelCase, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowerCamelCase__ : str =kwargs.pop('''question_encoder''' )
lowerCamelCase__ : Optional[int] =question_encoder_config.pop('''model_type''' )
lowerCamelCase__ : Tuple =kwargs.pop('''generator''' )
lowerCamelCase__ : Optional[int] =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase__ : Optional[int] =AutoConfig.for_model(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : int =AutoConfig.for_model(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =reduce_loss
lowerCamelCase__ : Optional[Any] =label_smoothing
lowerCamelCase__ : Optional[Any] =exclude_bos_score
lowerCamelCase__ : Optional[Any] =do_marginalize
lowerCamelCase__ : Optional[int] =title_sep
lowerCamelCase__ : Optional[int] =doc_sep
lowerCamelCase__ : Tuple =n_docs
lowerCamelCase__ : Optional[int] =max_combined_length
lowerCamelCase__ : Tuple =dataset
lowerCamelCase__ : Tuple =dataset_split
lowerCamelCase__ : Optional[int] =index_name
lowerCamelCase__ : Optional[int] =retrieval_vector_size
lowerCamelCase__ : str =retrieval_batch_size
lowerCamelCase__ : Tuple =passages_path
lowerCamelCase__ : Union[str, Any] =index_path
lowerCamelCase__ : List[str] =use_dummy_dataset
lowerCamelCase__ : List[str] =output_retrieved
lowerCamelCase__ : Optional[int] =do_deduplication
lowerCamelCase__ : Union[str, Any] =use_cache
if self.forced_eos_token_id is None:
lowerCamelCase__ : Union[str, Any] =getattr(self.generator, '''forced_eos_token_id''', lowerCamelCase )
@classmethod
def snake_case ( cls : List[str], lowerCamelCase : PretrainedConfig, lowerCamelCase : PretrainedConfig, **lowerCamelCase : str )-> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **lowerCamelCase )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ : List[str] =copy.deepcopy(self.__dict__ )
lowerCamelCase__ : List[str] =self.question_encoder.to_dict()
lowerCamelCase__ : str =self.generator.to_dict()
lowerCamelCase__ : str =self.__class__.model_type
return output
| 625 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =AutoConfig.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : Any =FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ='''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
lowerCamelCase__ : List[str] ='''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCamelCase__ : List[Any] ='''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[Any] ='''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : List[Any] =f'''layers_{str(__lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : str =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase__ : Dict =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : Tuple =tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase__ : str =flax_model.params['''encoder''']['''block'''][str(__lowerCamelCase )]['''layer''']
lowerCamelCase__ : int =tax_attention_key
lowerCamelCase__ : Optional[int] =tax_attention_out
lowerCamelCase__ : List[Any] =tax_attention_query
lowerCamelCase__ : Optional[Any] =tax_attention_value
lowerCamelCase__ : List[str] =tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] =tax_global_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
else:
lowerCamelCase__ : Union[str, Any] =tax_mlp_wi
lowerCamelCase__ : str =tax_mlp_wo
lowerCamelCase__ : Optional[Any] =tax_mlp_layer_norm
lowerCamelCase__ : Optional[int] =flax_model_encoder_layer_block
# Only for layer 0:
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : str =tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : Optional[int] =tax_encoder_global_rel_embedding
# Assigning
lowerCamelCase__ : int =tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
lowerCamelCase__ : List[Any] =tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : Dict =f'''layers_{str(__lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_module['''key''']['''kernel''']
lowerCamelCase__ : Any =tax_enc_dec_attention_module['''out''']['''kernel''']
lowerCamelCase__ : Dict =tax_enc_dec_attention_module['''query''']['''kernel''']
lowerCamelCase__ : List[str] =tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase__ : Any =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase__ : str =flax_model.params['''decoder''']['''block'''][str(__lowerCamelCase )]['''layer''']
lowerCamelCase__ : Union[str, Any] =tax_attention_key
lowerCamelCase__ : str =tax_attention_out
lowerCamelCase__ : Optional[int] =tax_attention_query
lowerCamelCase__ : Dict =tax_attention_value
lowerCamelCase__ : List[str] =tax_pre_attention_layer_norm
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_key
lowerCamelCase__ : Any =tax_enc_dec_attention_out
lowerCamelCase__ : Any =tax_enc_dec_attention_query
lowerCamelCase__ : Optional[int] =tax_enc_dec_attention_value
lowerCamelCase__ : Dict =tax_cross_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Tuple =tax_mlp_wi_a
lowerCamelCase__ : int =tax_mlp_wi_a
else:
lowerCamelCase__ : List[Any] =tax_mlp_wi
lowerCamelCase__ : Dict =tax_mlp_wo
lowerCamelCase__ : Tuple =txa_mlp_layer_norm
lowerCamelCase__ : Optional[Any] =flax_model_decoder_layer_block
# Decoder Normalization
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
lowerCamelCase__ : int =txa_decoder_norm
# Only for layer 0:
lowerCamelCase__ : Tuple =tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : Tuple =tax_decoder_rel_embedding
# Token Embeddings
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''token_embedder''']['''embedding''']
lowerCamelCase__ : Dict =txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCamelCase__ : int =tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(__lowerCamelCase )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
_lowercase : List[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 625 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 625 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : List[Any]=32, lowerCamelCase : Dict=3, lowerCamelCase : int=4, lowerCamelCase : str=[10, 20, 30, 40], lowerCamelCase : Any=[2, 2, 3, 2], lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : str=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[int]=10, lowerCamelCase : Any=0.02, lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"], lowerCamelCase : Optional[int]=3, lowerCamelCase : Tuple=None, )-> List[str]:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Tuple =batch_size
lowerCamelCase__ : str =image_size
lowerCamelCase__ : Any =num_channels
lowerCamelCase__ : Tuple =num_stages
lowerCamelCase__ : List[str] =hidden_sizes
lowerCamelCase__ : Any =depths
lowerCamelCase__ : Union[str, Any] =is_training
lowerCamelCase__ : Tuple =use_labels
lowerCamelCase__ : int =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : Dict =type_sequence_label_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Any =out_features
lowerCamelCase__ : Tuple =num_labels
lowerCamelCase__ : Optional[int] =scope
lowerCamelCase__ : Optional[int] =num_stages
def snake_case ( self : str )-> Optional[int]:
lowerCamelCase__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple =None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int =self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] )-> Any:
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def snake_case ( self : Union[str, Any] )-> Any:
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowerCamelCase, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowerCamelCase, loss_ignore_index=255, num_labels=self.num_labels, )
def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ : List[str] =UperNetForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Dict =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any =config_and_inputs
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_a = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =UperNetModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Optional[int] )-> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] )-> Dict:
return
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
lowerCamelCase__ : Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple =[*signature.parameters.keys()]
lowerCamelCase__ : List[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def snake_case ( self : Any )-> Union[str, Any]:
lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def snake_case ( self : Optional[Any] )-> List[Any]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def snake_case ( self : Any )-> List[str]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : int )-> Any:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : Dict )-> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case ( self : List[Any] )-> List[str]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : Tuple )-> str:
pass
def snake_case ( self : Optional[int] )-> List[str]:
def check_hidden_states_output(lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[str] ):
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : List[str] =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : str =_config_zero_init(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =_config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def snake_case ( self : Any )-> str:
pass
@slow
def snake_case ( self : int )-> Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : str =UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
lowerCamelCase__ : List[str] =Image.open(__lowerCamelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : str )-> Union[str, Any]:
lowerCamelCase__ : List[Any] =AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
lowerCamelCase__ : List[Any] =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : List[Any] =prepare_img()
lowerCamelCase__ : List[Any] =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : Dict =torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
lowerCamelCase__ : Tuple =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : Dict =prepare_img()
lowerCamelCase__ : Any =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : Any =model(**lowerCamelCase )
lowerCamelCase__ : Dict =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : List[str] =torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
| 625 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
if len(__lowerCamelCase ) < 2:
return collection
def circle_sort_util(__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int ) -> bool:
lowerCamelCase__ : Optional[int] =False
if low == high:
return swapped
lowerCamelCase__ : Optional[Any] =low
lowerCamelCase__ : int =high
while left < right:
if collection[left] > collection[right]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =(
collection[right],
collection[left],
)
lowerCamelCase__ : Optional[Any] =True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =(
collection[right + 1],
collection[left],
)
lowerCamelCase__ : int =True
lowerCamelCase__ : str =low + int((high - low) / 2 )
lowerCamelCase__ : Optional[int] =circle_sort_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Optional[int] =circle_sort_util(__lowerCamelCase , mid + 1 , __lowerCamelCase )
return swapped or left_swap or right_swap
lowerCamelCase__ : Dict =True
while is_not_sorted is True:
lowerCamelCase__ : Optional[Any] =circle_sort_util(__lowerCamelCase , 0 , len(__lowerCamelCase ) - 1 )
return collection
if __name__ == "__main__":
_lowercase : int = input("Enter numbers separated by a comma:\n").strip()
_lowercase : List[str] = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 625 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
_a = ['onnx']
def __init__( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : str )-> Optional[int]:
requires_backends(self, ['''onnx'''] )
@classmethod
def snake_case ( cls : List[str], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
@classmethod
def snake_case ( cls : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : Tuple )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
| 625 | 1 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowercase : int = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
_lowercase : List[Any] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
_lowercase : List[str] = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Value('''string''', id='''sequence''' ),
} ), codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''], reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
], )
def snake_case ( self : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Optional[Any]=None, lowerCamelCase : str=True, lowerCamelCase : Dict=False )-> Tuple:
if rouge_types is None:
lowerCamelCase__ : Optional[int] =['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
lowerCamelCase__ : Optional[Any] =rouge_scorer.RougeScorer(rouge_types=lowerCamelCase, use_stemmer=lowerCamelCase )
if use_aggregator:
lowerCamelCase__ : List[str] =scoring.BootstrapAggregator()
else:
lowerCamelCase__ : List[Any] =[]
for ref, pred in zip(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Optional[Any] =scorer.score(lowerCamelCase, lowerCamelCase )
if use_aggregator:
aggregator.add_scores(lowerCamelCase )
else:
scores.append(lowerCamelCase )
if use_aggregator:
lowerCamelCase__ : Dict =aggregator.aggregate()
else:
lowerCamelCase__ : int ={}
for key in scores[0]:
lowerCamelCase__ : Optional[int] =[score[key] for score in scores]
return result
| 625 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =x
lowerCamelCase__ : Any =y
for step in range(__lowerCamelCase ): # noqa: B007
lowerCamelCase__ : List[Any] =a * a - b * b + x
lowerCamelCase__ : Optional[int] =2 * a * b + y
lowerCamelCase__ : Union[str, Any] =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) )
def snake_case__ ( __lowerCamelCase : int = 800 , __lowerCamelCase : int = 600 , __lowerCamelCase : float = -0.6 , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 3.2 , __lowerCamelCase : int = 50 , __lowerCamelCase : bool = True , ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =Image.new('''RGB''' , (image_width, image_height) )
lowerCamelCase__ : Optional[int] =img.load()
# loop through the image-coordinates
for image_x in range(__lowerCamelCase ):
for image_y in range(__lowerCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase__ : Optional[Any] =figure_width / image_width * image_height
lowerCamelCase__ : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase__ : Optional[int] =figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase__ : Any =get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase__ : int =get_color_coded_rgb(__lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] =get_black_and_white_rgb(__lowerCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 625 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
for attribute in key.split('''.''' ):
lowerCamelCase__ : Dict =getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
lowerCamelCase__ : Tuple =getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
lowerCamelCase__ : int =hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase__ : List[str] =value
elif weight_type == "weight_g":
lowerCamelCase__ : List[Any] =value
elif weight_type == "weight_v":
lowerCamelCase__ : str =value
elif weight_type == "bias":
lowerCamelCase__ : List[Any] =value
else:
lowerCamelCase__ : Optional[int] =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Any ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[]
lowerCamelCase__ : Dict =fairseq_model.state_dict()
lowerCamelCase__ : Optional[int] =hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ : Union[str, Any] =False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
lowerCamelCase__ : List[str] =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__ : Tuple ='''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCamelCase__ : Dict =True
if "*" in mapped_key:
lowerCamelCase__ : Any =name.split(__lowerCamelCase )[0].split('''.''' )[-2]
lowerCamelCase__ : Optional[Any] =mapped_key.replace('''*''' , __lowerCamelCase )
if "weight_g" in name:
lowerCamelCase__ : Optional[Any] ='''weight_g'''
elif "weight_v" in name:
lowerCamelCase__ : Dict ='''weight_v'''
elif "weight" in name:
lowerCamelCase__ : Union[str, Any] ='''weight'''
elif "bias" in name:
lowerCamelCase__ : Tuple ='''bias'''
else:
lowerCamelCase__ : Tuple =None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =full_name.split('''conv_layers.''' )[-1]
lowerCamelCase__ : List[str] =name.split('''.''' )
lowerCamelCase__ : Union[str, Any] =int(items[0] )
lowerCamelCase__ : Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase__ : Optional[int] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase__ : Optional[int] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase__ : List[str] =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase__ : Any =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : int =SEWConfig()
if is_finetuned:
lowerCamelCase__ : Any =model.wav_encoder.wav_model.cfg
else:
lowerCamelCase__ : Optional[int] =model.cfg
lowerCamelCase__ : Dict =fs_config.conv_bias
lowerCamelCase__ : List[str] =eval(fs_config.conv_feature_layers )
lowerCamelCase__ : List[str] =[x[0] for x in conv_layers]
lowerCamelCase__ : Tuple =[x[1] for x in conv_layers]
lowerCamelCase__ : Optional[int] =[x[2] for x in conv_layers]
lowerCamelCase__ : int ='''gelu'''
lowerCamelCase__ : Union[str, Any] ='''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
lowerCamelCase__ : Union[str, Any] =0.0
lowerCamelCase__ : Union[str, Any] =fs_config.activation_fn.name
lowerCamelCase__ : Union[str, Any] =fs_config.encoder_embed_dim
lowerCamelCase__ : Any =0.02
lowerCamelCase__ : Any =fs_config.encoder_ffn_embed_dim
lowerCamelCase__ : Any =1e-5
lowerCamelCase__ : int =fs_config.encoder_layerdrop
lowerCamelCase__ : int =fs_config.encoder_attention_heads
lowerCamelCase__ : str =fs_config.conv_pos_groups
lowerCamelCase__ : Any =fs_config.conv_pos
lowerCamelCase__ : Optional[Any] =len(__lowerCamelCase )
lowerCamelCase__ : str =fs_config.encoder_layers
lowerCamelCase__ : List[Any] =fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowerCamelCase__ : Union[str, Any] =model.cfg
lowerCamelCase__ : Union[str, Any] =fs_config.final_dropout
lowerCamelCase__ : str =fs_config.layerdrop
lowerCamelCase__ : str =fs_config.activation_dropout
lowerCamelCase__ : int =fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowerCamelCase__ : List[str] =fs_config.attention_dropout
lowerCamelCase__ : int =fs_config.dropout_input
lowerCamelCase__ : int =fs_config.dropout
lowerCamelCase__ : Optional[int] =fs_config.mask_channel_length
lowerCamelCase__ : Dict =fs_config.mask_channel_prob
lowerCamelCase__ : Optional[Any] =fs_config.mask_length
lowerCamelCase__ : str =fs_config.mask_prob
lowerCamelCase__ : str ='''Wav2Vec2FeatureExtractor'''
lowerCamelCase__ : List[Any] ='''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : str=True ):
"""simple docstring"""
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowerCamelCase__ : List[str] =SEWConfig.from_pretrained(__lowerCamelCase )
else:
lowerCamelCase__ : str =convert_config(model[0] , __lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model[0].eval()
lowerCamelCase__ : str =True if config.feat_extract_norm == '''layer''' else False
lowerCamelCase__ : Any =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
if is_finetuned:
if dict_path:
lowerCamelCase__ : List[Any] =Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ : List[Any] =target_dict.pad_index
lowerCamelCase__ : str =target_dict.bos_index
lowerCamelCase__ : Optional[int] =target_dict.pad_index
lowerCamelCase__ : List[str] =target_dict.bos_index
lowerCamelCase__ : str =target_dict.eos_index
lowerCamelCase__ : int =len(target_dict.symbols )
lowerCamelCase__ : List[Any] =os.path.join(__lowerCamelCase , '''vocab.json''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , __lowerCamelCase )
lowerCamelCase__ : Optional[int] =WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__lowerCamelCase , )
lowerCamelCase__ : str =WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
lowerCamelCase__ : Dict =SEWForCTC(__lowerCamelCase )
else:
lowerCamelCase__ : Union[str, Any] =SEWModel(__lowerCamelCase )
feature_extractor.save_pretrained(__lowerCamelCase )
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_lowercase : Any = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 625 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =VideoMAEConfig()
set_architecture_configs(__lowerCamelCase , __lowerCamelCase )
if "finetuned" not in model_name:
lowerCamelCase__ : int =False
if "finetuned" in model_name:
lowerCamelCase__ : str ='''huggingface/label-files'''
if "kinetics" in model_name:
lowerCamelCase__ : List[Any] =400
lowerCamelCase__ : Optional[int] ='''kinetics400-id2label.json'''
elif "ssv2" in model_name:
lowerCamelCase__ : Tuple =174
lowerCamelCase__ : Optional[Any] ='''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : List[Any] ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict =idalabel
lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()}
return config
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if "small" in model_name:
lowerCamelCase__ : Optional[Any] =384
lowerCamelCase__ : List[Any] =1536
lowerCamelCase__ : int =12
lowerCamelCase__ : Dict =16
lowerCamelCase__ : List[Any] =12
lowerCamelCase__ : Optional[Any] =3
lowerCamelCase__ : Union[str, Any] =192
lowerCamelCase__ : str =768
elif "large" in model_name:
lowerCamelCase__ : Union[str, Any] =1024
lowerCamelCase__ : str =4096
lowerCamelCase__ : int =24
lowerCamelCase__ : Dict =16
lowerCamelCase__ : Union[str, Any] =12
lowerCamelCase__ : List[Any] =8
lowerCamelCase__ : int =512
lowerCamelCase__ : Optional[Any] =2048
elif "huge" in model_name:
lowerCamelCase__ : Optional[int] =1280
lowerCamelCase__ : Optional[int] =5120
lowerCamelCase__ : List[Any] =32
lowerCamelCase__ : List[Any] =16
lowerCamelCase__ : Optional[Any] =12
lowerCamelCase__ : Dict =8
lowerCamelCase__ : List[Any] =640
lowerCamelCase__ : Any =2560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
if "encoder." in name:
lowerCamelCase__ : Optional[int] =name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
lowerCamelCase__ : List[Any] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase__ : Any =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ : List[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowerCamelCase__ : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : int =name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowerCamelCase__ : Any =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowerCamelCase__ : Any =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : str =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
lowerCamelCase__ : List[str] =name.replace('''head''' , '''classifier''' )
return name
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase )
if key.startswith('''encoder.''' ):
lowerCamelCase__ : Optional[int] =key.replace('''encoder.''' , '''''' )
if "qkv" in key:
lowerCamelCase__ : Any =key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
lowerCamelCase__ : Tuple =config.decoder_hidden_size
lowerCamelCase__ : str =int(key_split[2] )
lowerCamelCase__ : Any ='''decoder.decoder_layers.'''
if "weight" in key:
lowerCamelCase__ : List[Any] =val[:dim, :]
lowerCamelCase__ : Any =val[dim : dim * 2, :]
lowerCamelCase__ : Dict =val[-dim:, :]
else:
lowerCamelCase__ : Optional[Any] =config.hidden_size
lowerCamelCase__ : Optional[Any] =int(key_split[1] )
lowerCamelCase__ : str ='''videomae.encoder.layer.'''
if "weight" in key:
lowerCamelCase__ : int =val[:dim, :]
lowerCamelCase__ : Tuple =val[dim : dim * 2, :]
lowerCamelCase__ : List[Any] =val[-dim:, :]
else:
lowerCamelCase__ : int =val
return orig_state_dict
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCamelCase__ : Optional[Any] =np.load(__lowerCamelCase )
return list(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : str =get_videomae_config(__lowerCamelCase )
if "finetuned" in model_name:
lowerCamelCase__ : Tuple =VideoMAEForVideoClassification(__lowerCamelCase )
else:
lowerCamelCase__ : int =VideoMAEForPreTraining(__lowerCamelCase )
# download original checkpoint, hosted on Google Drive
lowerCamelCase__ : Union[str, Any] ='''pytorch_model.bin'''
gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )
if "model" in files:
lowerCamelCase__ : Dict =files['''model''']
else:
lowerCamelCase__ : str =files['''module''']
lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify model on basic input
lowerCamelCase__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCamelCase__ : int =prepare_video()
lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' )
if "finetuned" not in model_name:
lowerCamelCase__ : Tuple =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase )
lowerCamelCase__ : int =model(**__lowerCamelCase )
lowerCamelCase__ : Dict =outputs.logits
lowerCamelCase__ : List[str] =[
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCamelCase__ : int =torch.Size([1, 174] )
lowerCamelCase__ : Dict =torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
lowerCamelCase__ : List[str] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
lowerCamelCase__ : List[Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[str] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCamelCase__ : str =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[Any] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : Optional[int] =torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCamelCase__ : List[str] =torch.Size([1, 400] )
lowerCamelCase__ : Dict =torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCamelCase__ : str =torch.Size([1, 400] )
lowerCamelCase__ : Any =torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
lowerCamelCase__ : Tuple =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCamelCase__ : Optional[int] =torch.Size([1, 174] )
lowerCamelCase__ : Any =torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : str =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCamelCase__ : str =torch.Size([1, 174] )
lowerCamelCase__ : int =torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCamelCase__ : str =outputs.loss
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__lowerCamelCase , organization='''nielsr''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 625 | 1 |
"""simple docstring"""
_lowercase : List[Any] = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
_lowercase : int = {value: key for key, value in encode_dict.items()}
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Tuple =''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
if set(__lowerCamelCase ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
lowerCamelCase__ : Dict =''''''
for word in coded.split():
while len(__lowerCamelCase ) != 0:
decoded += decode_dict[word[:5]]
lowerCamelCase__ : str =word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 625 |
"""simple docstring"""
_lowercase : str = 0 # The first color of the flag.
_lowercase : Dict = 1 # The second color of the flag.
_lowercase : Tuple = 2 # The third color of the flag.
_lowercase : Optional[int] = (red, white, blue)
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
if not sequence:
return []
if len(__lowerCamelCase ) == 1:
return list(__lowerCamelCase )
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Dict =len(__lowerCamelCase ) - 1
lowerCamelCase__ : Tuple =0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =sequence[high], sequence[mid]
high -= 1
else:
lowerCamelCase__ : Dict =f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[Any] = input("Enter numbers separated by commas:\n").strip()
_lowercase : int = [int(item.strip()) for item in user_input.split(",")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 625 | 1 |
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(__lowerCamelCase ) / len(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = StableUnCLIPImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def snake_case ( self : List[str] )-> str:
lowerCamelCase__ : Dict =32
lowerCamelCase__ : Optional[Any] =embedder_hidden_size
# image encoding components
lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase__ : Tuple =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowerCamelCase__ : Dict =UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =DDIMScheduler(
beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =AutoencoderKL()
lowerCamelCase__ : int ={
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
lowerCamelCase__ : int =input_image * 0.5 + 0.5
lowerCamelCase__ : Dict =input_image.clamp(0, 1 )
lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase )
lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase )
inputs.update({'''image_embeds''': None} )
lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images
lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def snake_case ( self : List[str] )-> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : List[Any] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : Tuple =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : List[Any] =pipe(
lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', )
lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 625 | 1 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
_lowercase : Dict = parser.parse_args()
_lowercase : Optional[int] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_lowercase : int = CLIPImageProcessor()
_lowercase : Any = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
_lowercase : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 4000000 ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b
return sum(__lowerCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 625 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_lowercase : Dict = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'bloom'
_a = ['past_key_values']
_a = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self : List[Any], lowerCamelCase : int=25_0880, lowerCamelCase : int=64, lowerCamelCase : int=2, lowerCamelCase : Optional[int]=8, lowerCamelCase : Optional[int]=1E-5, lowerCamelCase : Optional[int]=0.02, lowerCamelCase : List[str]=True, lowerCamelCase : Optional[int]=1, lowerCamelCase : str=2, lowerCamelCase : Dict=False, lowerCamelCase : int=0.0, lowerCamelCase : List[Any]=0.0, lowerCamelCase : Optional[Any]=1, lowerCamelCase : Optional[int]=False, **lowerCamelCase : Optional[Any], )-> Tuple:
lowerCamelCase__ : List[str] =vocab_size
# Backward compatibility with n_embed kwarg
lowerCamelCase__ : List[Any] =kwargs.pop('''n_embed''', lowerCamelCase )
lowerCamelCase__ : List[Any] =hidden_size if n_embed is None else n_embed
lowerCamelCase__ : Tuple =n_layer
lowerCamelCase__ : Tuple =n_head
lowerCamelCase__ : int =layer_norm_epsilon
lowerCamelCase__ : Optional[Any] =initializer_range
lowerCamelCase__ : Dict =use_cache
lowerCamelCase__ : Any =pretraining_tp
lowerCamelCase__ : List[Any] =apply_residual_connection_post_layernorm
lowerCamelCase__ : int =hidden_dropout
lowerCamelCase__ : str =attention_dropout
lowerCamelCase__ : Optional[int] =bos_token_id
lowerCamelCase__ : Optional[int] =eos_token_id
lowerCamelCase__ : List[Any] =slow_but_exact
super().__init__(bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, **lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = version.parse('1.12' )
def __init__( self : int, lowerCamelCase : PretrainedConfig, lowerCamelCase : str = "default", lowerCamelCase : List[PatchingSpec] = None, lowerCamelCase : bool = False, )-> Optional[Any]:
super().__init__(lowerCamelCase, task=lowerCamelCase, patching_specs=lowerCamelCase, use_past=lowerCamelCase )
if not getattr(self._config, '''pad_token_id''', lowerCamelCase ):
# TODO: how to do that better?
lowerCamelCase__ : Optional[Any] =0
@property
def snake_case ( self : int )-> Mapping[str, Mapping[int, str]]:
lowerCamelCase__ : Optional[int] =OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowerCamelCase, direction='''inputs''', inverted_values_shape=lowerCamelCase )
lowerCamelCase__ : Optional[Any] ={0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowerCamelCase__ : Any ={0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def snake_case ( self : Optional[int] )-> int:
return self._config.n_layer
@property
def snake_case ( self : str )-> int:
return self._config.n_head
@property
def snake_case ( self : List[Any] )-> float:
return 1E-3
def snake_case ( self : int, lowerCamelCase : "PreTrainedTokenizer", lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional["TensorType"] = None, )-> Mapping[str, Any]:
lowerCamelCase__ : str =super(lowerCamelCase, self ).generate_dummy_inputs(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ : Optional[Any] =OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : int =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCamelCase__ : List[str] =seqlen + 2
lowerCamelCase__ : Union[str, Any] =self._config.hidden_size // self.num_attention_heads
lowerCamelCase__ : Any =(
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowerCamelCase__ : Any =(
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowerCamelCase__ : Tuple =[
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(self.num_layers )
]
lowerCamelCase__ : Any =common_inputs['''attention_mask''']
if self.use_past:
lowerCamelCase__ : Dict =ordered_inputs['''attention_mask'''].dtype
lowerCamelCase__ : Dict =torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowerCamelCase, lowerCamelCase, dtype=lowerCamelCase )], dim=1 )
return ordered_inputs
@property
def snake_case ( self : Any )-> int:
return 13
| 625 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = BlenderbotSmallConfig
_a = {}
_a = 'gelu'
def __init__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=False, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : str=32, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=20, lowerCamelCase : int=2, lowerCamelCase : Any=1, lowerCamelCase : Optional[Any]=0, )-> List[str]:
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Tuple =is_training
lowerCamelCase__ : Dict =use_labels
lowerCamelCase__ : List[Any] =vocab_size
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : str =num_hidden_layers
lowerCamelCase__ : Union[str, Any] =num_attention_heads
lowerCamelCase__ : Any =intermediate_size
lowerCamelCase__ : Dict =hidden_dropout_prob
lowerCamelCase__ : List[Any] =attention_probs_dropout_prob
lowerCamelCase__ : str =max_position_embeddings
lowerCamelCase__ : Optional[int] =eos_token_id
lowerCamelCase__ : str =pad_token_id
lowerCamelCase__ : Union[str, Any] =bos_token_id
def snake_case ( self : Any )-> Any:
lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCamelCase__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Any =tf.concat([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : int =self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCamelCase__ : Optional[int] =prepare_blenderbot_small_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return config, inputs_dict
def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Any )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder()
lowerCamelCase__ : List[Any] =inputs_dict['''input_ids''']
lowerCamelCase__ : Optional[int] =input_ids[:1, :]
lowerCamelCase__ : str =inputs_dict['''attention_mask'''][:1, :]
lowerCamelCase__ : Union[str, Any] =inputs_dict['''head_mask''']
lowerCamelCase__ : Optional[Any] =1
# first forward pass
lowerCamelCase__ : Dict =model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : List[str] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowerCamelCase__ : List[str] =tf.concat([input_ids, next_tokens], axis=-1 )
lowerCamelCase__ : str =tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase, attention_mask=lowerCamelCase )[0]
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : Tuple =int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowerCamelCase__ : int =output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : List[str] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-3 )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : List[str] =tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : str =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_a = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_a = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a = True
_a = False
_a = False
def snake_case ( self : Any )-> str:
lowerCamelCase__ : Tuple =TFBlenderbotSmallModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase )
def snake_case ( self : Any )-> Optional[int]:
self.config_tester.run_common_tests()
def snake_case ( self : int )-> str:
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
_a = 'facebook/blenderbot_small-90M'
@cached_property
def snake_case ( self : Any )-> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Dict =self.tokenizer(self.src_text, return_tensors='''tf''' )
lowerCamelCase__ : Any =self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase, )
lowerCamelCase__ : Any =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 625 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
lowerCamelCase__ : List[Any] =len(bin(__lowerCamelCase )[3:] )
lowerCamelCase__ : Any =bin(abs(__lowerCamelCase ) - (1 << binary_number_length) )[3:]
lowerCamelCase__ : Optional[int] =(
(
'''1'''
+ '''0''' * (binary_number_length - len(__lowerCamelCase ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ):
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
# Base Case
if curr_ind == len(__lowerCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__lowerCamelCase ) ):
if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Insert current vertex into path as next transition
lowerCamelCase__ : Tuple =next_ver
# Validate created path
if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase__ : int =-1
return False
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase__ : Union[str, Any] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
| 625 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =[2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase__ : Union[str, Any] =True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase__ : Optional[Any] =True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase__ : Optional[int] =True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase__ : Union[str, Any] =[3, 3, 3, 3]
lowerCamelCase__ : Dict =[5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase__ : List[str] =[4, 4, 4, 4]
lowerCamelCase__ : int =[3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase__ : List[str] =[3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase__ : Dict =[3, 3, 3, 3]
else:
lowerCamelCase__ : Optional[int] =[2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase__ : Dict =96
elif "small" in model_name:
lowerCamelCase__ : Any =96
elif "base" in model_name:
lowerCamelCase__ : str =128
elif "large" in model_name:
lowerCamelCase__ : Tuple =192
elif "xlarge" in model_name:
lowerCamelCase__ : Optional[Any] =256
elif "huge" in model_name:
lowerCamelCase__ : Optional[Any] =352
# set label information
lowerCamelCase__ : List[Any] ='''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase__ : int ='''imagenet-22k-id2label.json'''
else:
lowerCamelCase__ : Optional[int] ='''imagenet-1k-id2label.json'''
lowerCamelCase__ : List[Any] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : Dict ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : int ={v: k for k, v in idalabel.items()}
lowerCamelCase__ : int =FocalNetConfig(
embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , )
return config
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ : Tuple =name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowerCamelCase__ : str ='''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase__ : Optional[int] =name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase__ : str =name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase__ : int ='''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase__ : int ='''layernorm.bias'''
if "head" in name:
lowerCamelCase__ : Optional[int] =name.replace('''head''' , '''classifier''' )
else:
lowerCamelCase__ : Union[str, Any] ='''focalnet.''' + name
return name
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=False ):
"""simple docstring"""
# fmt: off
lowerCamelCase__ : Dict ={
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase__ : Optional[Any] =model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __lowerCamelCase )
lowerCamelCase__ : List[str] =torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ : List[Any] =state_dict.pop(__lowerCamelCase )
lowerCamelCase__ : Any =val
lowerCamelCase__ : Tuple =get_focalnet_config(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =FocalNetForImageClassification(__lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCamelCase )
# verify conversion
lowerCamelCase__ : Any ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ : Dict =BitImageProcessor(
do_resize=__lowerCamelCase , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=224 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , )
lowerCamelCase__ : Any =Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
lowerCamelCase__ : Optional[int] =processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowerCamelCase__ : List[str] =transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
lowerCamelCase__ : Dict =image_transforms(__lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1e-4 )
lowerCamelCase__ : Any =model(**__lowerCamelCase )
lowerCamelCase__ : List[str] =outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase__ : Tuple =torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase__ : Union[str, Any] =torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
lowerCamelCase__ : Optional[Any] =torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase__ : Any =torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
lowerCamelCase__ : Any =torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase__ : Tuple =torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
_lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
_lowercase : Any = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 625 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase : List[str] = 2_5_0_0_0_4
_lowercase : Optional[Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = MBartTokenizer
_a = MBartTokenizerFast
_a = True
_a = True
def snake_case ( self : Tuple )-> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Union[str, Any] =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Dict )-> Union[str, Any]:
lowerCamelCase__ : Any =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
lowerCamelCase__ : List[Any] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowerCamelCase__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
lowerCamelCase__ : str =tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def snake_case ( self : Tuple )-> List[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ : int =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[str] =tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase__ : List[str] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Any =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ : Dict =tempfile.mkdtemp()
lowerCamelCase__ : List[str] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Tuple =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Optional[int] =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Any =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ : Optional[int] =tempfile.mkdtemp()
lowerCamelCase__ : int =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Dict =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : int =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = 'facebook/mbart-large-en-ro'
_a = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_a = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_a = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case ( cls : List[Any] )-> Optional[int]:
lowerCamelCase__ : MBartTokenizer =MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' )
lowerCamelCase__ : Optional[int] =1
return cls
def snake_case ( self : Optional[Any] )-> List[str]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_0020 )
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
def snake_case ( self : Optional[Any] )-> str:
self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids )
lowerCamelCase__ : Optional[int] =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Optional[int] =['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0], lowerCamelCase )
lowerCamelCase__ : Dict =10
lowerCamelCase__ : Optional[int] =self.tokenizer(lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], lowerCamelCase )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
def snake_case ( self : int )-> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_0026, 25_0001] )
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : int =tempfile.mkdtemp()
lowerCamelCase__ : Optional[int] =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =MBartTokenizer.from_pretrained(lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase )
@require_torch
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ : Optional[Any] =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, return_tensors='''pt''' )
lowerCamelCase__ : Dict =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case ( self : Optional[Any] )-> Any:
lowerCamelCase__ : str =self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', )
lowerCamelCase__ : List[Any] =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
lowerCamelCase__ : Any =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case ( self : List[Any] )-> Dict:
lowerCamelCase__ : Any =self.tokenizer(self.src_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=3, return_tensors='''pt''' )
lowerCamelCase__ : Tuple =self.tokenizer(
text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=10, return_tensors='''pt''' )
lowerCamelCase__ : Union[str, Any] =targets['''input_ids''']
lowerCamelCase__ : List[Any] =shift_tokens_right(lowerCamelCase, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : str =self.tokenizer._build_translation_inputs(
'''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase ), {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 25_0004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
}, )
| 625 | 1 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =XCLIPTextConfig()
# derive patch size from model name
lowerCamelCase__ : Any =model_name.find('''patch''' )
lowerCamelCase__ : str =int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
lowerCamelCase__ : str =XCLIPVisionConfig(patch_size=__lowerCamelCase , num_frames=__lowerCamelCase )
if "large" in model_name:
lowerCamelCase__ : Union[str, Any] =768
lowerCamelCase__ : Optional[Any] =3072
lowerCamelCase__ : List[Any] =12
lowerCamelCase__ : List[Any] =1024
lowerCamelCase__ : Optional[int] =4096
lowerCamelCase__ : int =16
lowerCamelCase__ : List[Any] =24
lowerCamelCase__ : Optional[Any] =768
lowerCamelCase__ : Any =3072
if model_name == "xclip-large-patch14-16-frames":
lowerCamelCase__ : Dict =336
lowerCamelCase__ : Any =XCLIPConfig.from_text_vision_configs(__lowerCamelCase , __lowerCamelCase )
if "large" in model_name:
lowerCamelCase__ : int =768
return config
def snake_case__ ( __lowerCamelCase : Dict ):
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
lowerCamelCase__ : str =name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
lowerCamelCase__ : Dict =name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
lowerCamelCase__ : List[str] =name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
lowerCamelCase__ : List[Any] =name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
lowerCamelCase__ : str =name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
lowerCamelCase__ : Any =name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
lowerCamelCase__ : str =name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
lowerCamelCase__ : Optional[int] =name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
lowerCamelCase__ : Optional[Any] =name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
lowerCamelCase__ : str =name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
lowerCamelCase__ : List[str] =name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
lowerCamelCase__ : Tuple =name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
lowerCamelCase__ : List[str] =name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
lowerCamelCase__ : Dict =name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
lowerCamelCase__ : str =name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
lowerCamelCase__ : str =name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
lowerCamelCase__ : str =name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
lowerCamelCase__ : int =name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
lowerCamelCase__ : Union[str, Any] =name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def snake_case__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase )
if "attn.in_proj" in key:
lowerCamelCase__ : Union[str, Any] =key.split('''.''' )
if key.startswith('''visual''' ):
lowerCamelCase__ : Optional[int] =key_split[3]
lowerCamelCase__ : Tuple =config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCamelCase__ : Union[str, Any] =val[
:dim, :
]
lowerCamelCase__ : Any =val[
dim : dim * 2, :
]
lowerCamelCase__ : List[str] =val[
-dim:, :
]
else:
lowerCamelCase__ : Optional[Any] =val[
:dim
]
lowerCamelCase__ : int =val[
dim : dim * 2
]
lowerCamelCase__ : Optional[int] =val[
-dim:
]
else:
if "weight" in key:
lowerCamelCase__ : Tuple =val[
:dim, :
]
lowerCamelCase__ : Optional[int] =val[
dim : dim * 2, :
]
lowerCamelCase__ : str =val[
-dim:, :
]
else:
lowerCamelCase__ : Union[str, Any] =val[:dim]
lowerCamelCase__ : int =val[
dim : dim * 2
]
lowerCamelCase__ : str =val[-dim:]
elif key.startswith('''mit''' ):
lowerCamelCase__ : int =key_split[2]
lowerCamelCase__ : List[str] =config.vision_config.mit_hidden_size
if "weight" in key:
lowerCamelCase__ : Any =val[:dim, :]
lowerCamelCase__ : List[str] =val[dim : dim * 2, :]
lowerCamelCase__ : Optional[int] =val[-dim:, :]
else:
lowerCamelCase__ : Optional[Any] =val[:dim]
lowerCamelCase__ : Optional[int] =val[dim : dim * 2]
lowerCamelCase__ : str =val[-dim:]
else:
lowerCamelCase__ : Any =key_split[2]
lowerCamelCase__ : Optional[int] =config.text_config.hidden_size
if "weight" in key:
lowerCamelCase__ : Union[str, Any] =val[:dim, :]
lowerCamelCase__ : Tuple =val[
dim : dim * 2, :
]
lowerCamelCase__ : List[str] =val[-dim:, :]
else:
lowerCamelCase__ : Optional[Any] =val[:dim]
lowerCamelCase__ : Union[str, Any] =val[
dim : dim * 2
]
lowerCamelCase__ : Optional[Any] =val[-dim:]
else:
lowerCamelCase__ : Optional[Any] =rename_key(__lowerCamelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCamelCase__ : str =val.T
lowerCamelCase__ : int =val
return orig_state_dict
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if num_frames == 8:
lowerCamelCase__ : List[Any] ='''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
lowerCamelCase__ : Optional[int] ='''eating_spaghetti.npy'''
elif num_frames == 32:
lowerCamelCase__ : List[str] ='''eating_spaghetti_32_frames.npy'''
lowerCamelCase__ : List[str] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=__lowerCamelCase , repo_type='''dataset''' , )
lowerCamelCase__ : Tuple =np.load(__lowerCamelCase )
return list(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Tuple=False ):
"""simple docstring"""
lowerCamelCase__ : List[Any] ={
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
lowerCamelCase__ : Union[str, Any] =model_to_url[model_name]
lowerCamelCase__ : str =8
if "16-frames" in model_name:
lowerCamelCase__ : Tuple =16
elif "shot" in model_name:
lowerCamelCase__ : List[Any] =32
lowerCamelCase__ : str =get_xclip_config(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : int =XCLIPModel(__lowerCamelCase )
model.eval()
if "drive" in checkpoint_url:
lowerCamelCase__ : Optional[int] ='''pytorch_model.bin'''
gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase )
lowerCamelCase__ : str =torch.load(__lowerCamelCase , map_location='''cpu''' )['''model''']
else:
lowerCamelCase__ : Optional[Any] =torch.hub.load_state_dict_from_url(__lowerCamelCase )['''model''']
lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] =XCLIPModel(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict =model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCamelCase__ : Tuple =336 if model_name == '''xclip-large-patch14-16-frames''' else 224
lowerCamelCase__ : Optional[int] =VideoMAEImageProcessor(size=__lowerCamelCase )
lowerCamelCase__ : List[str] =CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
lowerCamelCase__ : str =CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
lowerCamelCase__ : Any =XCLIPProcessor(image_processor=__lowerCamelCase , tokenizer=__lowerCamelCase )
lowerCamelCase__ : Tuple =prepare_video(__lowerCamelCase )
lowerCamelCase__ : Dict =processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=__lowerCamelCase , return_tensors='''pt''' , padding=__lowerCamelCase )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
lowerCamelCase__ : int =model(**__lowerCamelCase )
# Verify outputs
lowerCamelCase__ : str =outputs.logits_per_video
lowerCamelCase__ : Dict =logits_per_video.softmax(dim=1 )
print('''Probs:''' , __lowerCamelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCamelCase__ : Optional[int] =torch.tensor([[0.00_19, 0.99_51, 0.00_30]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCamelCase__ : Any =torch.tensor([[7.0_9_9_9e-0_4, 9.9_8_8_3e-0_1, 4.5_5_8_0e-0_4]] )
elif model_name == "xclip-base-patch16":
lowerCamelCase__ : Tuple =torch.tensor([[0.00_83, 0.96_81, 0.02_36]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCamelCase__ : List[str] =torch.tensor([[7.6_9_3_7e-0_4, 9.9_7_2_8e-0_1, 1.9_4_7_3e-0_3]] )
elif model_name == "xclip-large-patch14":
lowerCamelCase__ : str =torch.tensor([[0.00_62, 0.98_64, 0.00_75]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCamelCase__ : int =torch.tensor([[3.3_8_7_7e-0_4, 9.9_9_3_7e-0_1, 2.8_8_8_8e-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCamelCase__ : List[str] =torch.tensor([[0.05_55, 0.89_14, 0.05_31]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCamelCase__ : Any =torch.tensor([[3.8_5_5_4e-0_4, 9.9_9_2_9e-0_1, 3.2_7_5_4e-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCamelCase__ : List[Any] =torch.tensor([[0.00_36, 0.99_20, 0.00_45]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCamelCase__ : Optional[Any] =torch.tensor([[7.1_8_9_0e-0_6, 9.9_9_9_4e-0_1, 5.6_5_5_9e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCamelCase__ : Any =torch.tensor([[1.0_3_2_0e-0_5, 9.9_9_9_3e-0_1, 6.2_4_3_5e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCamelCase__ : Optional[int] =torch.tensor([[4.1_3_7_7e-0_6, 9.9_9_9_0e-0_1, 9.8_3_8_6e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCamelCase__ : str =torch.tensor([[4.1_3_4_7e-0_5, 9.9_9_6_2e-0_1, 3.3_4_1_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCamelCase__ : Tuple =torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCamelCase__ : Optional[Any] =torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCamelCase__ : List[Any] =torch.tensor([[0.00_27, 0.99_04, 0.00_70]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCamelCase__ : Dict =torch.tensor([[9.8_2_1_9e-0_4, 9.9_5_9_3e-0_1, 3.0_8_6_3e-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCamelCase__ : Optional[Any] =torch.tensor([[3.5_0_8_2e-0_4, 9.9_7_8_5e-0_1, 1.7_9_6_6e-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(__lowerCamelCase , organization='''nielsr''' )
processor.push_to_hub(__lowerCamelCase , organization='''nielsr''' )
slow_tokenizer.push_to_hub(__lowerCamelCase , organization='''nielsr''' )
if __name__ == "__main__":
_lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : int = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 625 | 1 |
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
if len(__lowerCamelCase ) < k or k < 0:
raise ValueError('''Invalid Input''' )
lowerCamelCase__ : Dict =sum(array[:k] )
for i in range(len(__lowerCamelCase ) - k ):
lowerCamelCase__ : str =current_sum - array[i] + array[i + k]
lowerCamelCase__ : Optional[int] =max(__lowerCamelCase , __lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_lowercase : Dict = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0_0)]
_lowercase : Dict = randint(0, 1_1_0)
print(f'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 22 ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =range(1 , __lowerCamelCase )
lowerCamelCase__ : str =range(1 , __lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }')
| 625 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : int = logging.get_logger(__name__)
_lowercase : Any = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'roformer'
def __init__( self : Optional[Any], lowerCamelCase : Union[str, Any]=5_0000, lowerCamelCase : Tuple=None, lowerCamelCase : Dict=768, lowerCamelCase : Tuple=12, lowerCamelCase : List[Any]=12, lowerCamelCase : List[Any]=3072, lowerCamelCase : str="gelu", lowerCamelCase : Any=0.1, lowerCamelCase : int=0.1, lowerCamelCase : str=1536, lowerCamelCase : Tuple=2, lowerCamelCase : int=0.02, lowerCamelCase : str=1E-12, lowerCamelCase : List[str]=0, lowerCamelCase : Optional[int]=False, lowerCamelCase : List[str]=True, **lowerCamelCase : List[str], )-> List[str]:
super().__init__(pad_token_id=lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =vocab_size
lowerCamelCase__ : Any =hidden_size if embedding_size is None else embedding_size
lowerCamelCase__ : Tuple =hidden_size
lowerCamelCase__ : Tuple =num_hidden_layers
lowerCamelCase__ : Union[str, Any] =num_attention_heads
lowerCamelCase__ : Optional[Any] =hidden_act
lowerCamelCase__ : Any =intermediate_size
lowerCamelCase__ : Union[str, Any] =hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase__ : int =max_position_embeddings
lowerCamelCase__ : List[Any] =type_vocab_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Optional[int] =layer_norm_eps
lowerCamelCase__ : Optional[Any] =rotary_value
lowerCamelCase__ : Optional[int] =use_cache
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def snake_case ( self : List[Any] )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase__ : Any ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ : int ={0: '''batch''', 1: '''sequence'''}
lowerCamelCase__ : List[str] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 625 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def snake_case ( self : Dict, lowerCamelCase : List[str], lowerCamelCase : Any )-> Union[str, Any]:
pass
def snake_case ( self : List[str] )-> List[str]:
pass
def snake_case ( self : Optional[Any] )-> str:
pass
def snake_case ( self : Union[str, Any], lowerCamelCase : np.ndarray, lowerCamelCase : np.ndarray, lowerCamelCase : float )-> Dict:
lowerCamelCase__ : Union[str, Any] =np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Any=None, **lowerCamelCase : str )-> int:
lowerCamelCase__ : List[str] =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Dict =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str=None, **lowerCamelCase : List[Any] )-> int:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=None, **lowerCamelCase : int )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Optional[int] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[Any] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : int =output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : List[str] =after_output[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-3 )
def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[str] =model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase )
lowerCamelCase__ : int =output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Tuple =to_atuple(vision_model.config.image_size )
lowerCamelCase__ : Optional[Any] =to_atuple(vision_model.config.patch_size )
lowerCamelCase__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCamelCase__ : int =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCamelCase__ : List[Any] =output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] )-> Any:
pt_model.to(lowerCamelCase )
pt_model.eval()
# prepare inputs
lowerCamelCase__ : Any =inputs_dict
lowerCamelCase__ : Any ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCamelCase__ : List[str] =pt_model(**lowerCamelCase ).to_tuple()
lowerCamelCase__ : Optional[Any] =fx_model(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase )
lowerCamelCase__ : List[Any] =fx_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : str =VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase )
pt_model_loaded.to(lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2 )
def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : str )-> List[Any]:
lowerCamelCase__ : Any =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : str =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase )
lowerCamelCase__ : Tuple =fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ : Dict =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Any =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : int =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase )
def snake_case ( self : Tuple )-> Any:
lowerCamelCase__ : Tuple =self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase )
def snake_case ( self : str )-> Any:
lowerCamelCase__ : str =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase )
@is_pt_flax_cross_test
def snake_case ( self : Tuple )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs()
lowerCamelCase__ : Union[str, Any] =config_inputs_dict.pop('''vision_config''' )
lowerCamelCase__ : Optional[Any] =config_inputs_dict.pop('''text_config''' )
lowerCamelCase__ : Tuple =config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase )
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@slow
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.get_pretrained_model_and_inputs()
lowerCamelCase__ : Optional[int] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[str] =outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase )
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[Any] =after_outputs[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-5 )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : List[str] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : List[str] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : Optional[int] =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Any ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : int )-> int:
lowerCamelCase__ : str =FlaxViTModel(lowerCamelCase )
lowerCamelCase__ : Any =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : int )-> Optional[int]:
lowerCamelCase__ : Any =FlaxViTModelTester(self )
lowerCamelCase__ : Union[str, Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =vit_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Any =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Union[str, Any] =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : Optional[Any] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : Union[str, Any] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : str =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Dict )-> Dict:
lowerCamelCase__ : str =FlaxCLIPVisionModel(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[Any] =FlaxCLIPVisionModelTester(self )
lowerCamelCase__ : List[Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =clip_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[int] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : List[Any] =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Any =FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase__ : Dict =processor(
text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' )
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
lowerCamelCase__ : Any =np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3 ) )
| 625 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str]=7, lowerCamelCase : Optional[Any]=3, lowerCamelCase : Any=30, lowerCamelCase : Dict=400, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=None, lowerCamelCase : int=0.9, lowerCamelCase : Optional[int]=None, lowerCamelCase : List[Any]=True, lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5], lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5], )-> Any:
lowerCamelCase__ : str =size if size is not None else {'''shortest_edge''': 30}
lowerCamelCase__ : List[str] =crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
lowerCamelCase__ : List[Any] =parent
lowerCamelCase__ : Any =batch_size
lowerCamelCase__ : Union[str, Any] =num_channels
lowerCamelCase__ : str =min_resolution
lowerCamelCase__ : int =max_resolution
lowerCamelCase__ : Any =do_resize_and_center_crop
lowerCamelCase__ : Any =size
lowerCamelCase__ : Any =crop_pct
lowerCamelCase__ : Optional[Any] =crop_size
lowerCamelCase__ : int =do_normalize
lowerCamelCase__ : Union[str, Any] =image_mean
lowerCamelCase__ : int =image_std
def snake_case ( self : Optional[Any] )-> Any:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = PoolFormerImageProcessor if is_vision_available() else None
def snake_case ( self : List[str] )-> Dict:
lowerCamelCase__ : Optional[int] =PoolFormerImageProcessingTester(self )
@property
def snake_case ( self : Optional[Any] )-> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : Union[str, Any] )-> Union[str, Any]:
lowerCamelCase__ : int =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_pct''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''image_std''' ) )
def snake_case ( self : Optional[int] )-> str:
lowerCamelCase__ : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size, {'''height''': 30, '''width''': 30} )
lowerCamelCase__ : str =self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def snake_case ( self : Tuple )-> List[str]:
pass
def snake_case ( self : Dict )-> Tuple:
# Initialize image_processing
lowerCamelCase__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[Any] =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowerCamelCase__ : Any =image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def snake_case ( self : List[str] )-> Dict:
# Initialize image_processing
lowerCamelCase__ : str =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Optional[int] =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowerCamelCase__ : Dict =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowerCamelCase__ : List[str] =image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def snake_case ( self : Optional[Any] )-> List[Any]:
# Initialize image_processing
lowerCamelCase__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : List[Any] =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowerCamelCase__ : Optional[int] =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowerCamelCase__ : Optional[int] =image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if index == number_of_items:
return 0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : List[str] =knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 )
if weights[index] <= max_weight:
lowerCamelCase__ : Dict =values[index] + knapsack(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_weight - weights[index] , index + 1 )
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return "".join(chr(ord(__lowerCamelCase ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 625 |
"""simple docstring"""
_lowercase : Optional[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 625 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_lowercase : str = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_lowercase : str = {
"allenai/led-base-16384": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCamelCase__ : Optional[int] =bs[:]
lowerCamelCase__ : Dict =0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase__ : str =[chr(__lowerCamelCase ) for n in cs]
return dict(zip(__lowerCamelCase , __lowerCamelCase ) )
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Dict =set()
lowerCamelCase__ : Optional[Any] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase__ : str =char
return pairs
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self : str, lowerCamelCase : List[Any], lowerCamelCase : int, lowerCamelCase : Optional[Any]="replace", lowerCamelCase : List[str]="<s>", lowerCamelCase : Union[str, Any]="</s>", lowerCamelCase : List[Any]="</s>", lowerCamelCase : Tuple="<s>", lowerCamelCase : int="<unk>", lowerCamelCase : Optional[int]="<pad>", lowerCamelCase : Tuple="<mask>", lowerCamelCase : Tuple=False, **lowerCamelCase : Optional[int], )-> Optional[Any]:
lowerCamelCase__ : Optional[Any] =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else bos_token
lowerCamelCase__ : str =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else eos_token
lowerCamelCase__ : Optional[Any] =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else sep_token
lowerCamelCase__ : Dict =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else cls_token
lowerCamelCase__ : int =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else unk_token
lowerCamelCase__ : List[str] =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : Any =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, add_prefix_space=lowerCamelCase, **lowerCamelCase, )
with open(lowerCamelCase, encoding='''utf-8''' ) as vocab_handle:
lowerCamelCase__ : Tuple =json.load(lowerCamelCase )
lowerCamelCase__ : Optional[int] ={v: k for k, v in self.encoder.items()}
lowerCamelCase__ : Tuple =errors # how to handle errors in decoding
lowerCamelCase__ : Optional[Any] =bytes_to_unicode()
lowerCamelCase__ : Optional[Any] ={v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase, encoding='''utf-8''' ) as merges_handle:
lowerCamelCase__ : Any =merges_handle.read().split('''\n''' )[1:-1]
lowerCamelCase__ : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase__ : List[str] =dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowerCamelCase__ : Optional[Any] ={}
lowerCamelCase__ : Optional[int] =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase__ : Optional[Any] =re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def snake_case ( self : str )-> List[Any]:
return len(self.encoder )
def snake_case ( self : Optional[Any] )-> Optional[Any]:
return dict(self.encoder, **self.added_tokens_encoder )
def snake_case ( self : Optional[int], lowerCamelCase : Union[str, Any] )-> Tuple:
if token in self.cache:
return self.cache[token]
lowerCamelCase__ : Tuple =tuple(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
lowerCamelCase__ : List[Any] =min(lowerCamelCase, key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase, float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase__ , lowerCamelCase__ : List[Any] =bigram
lowerCamelCase__ : Tuple =[]
lowerCamelCase__ : List[str] =0
while i < len(lowerCamelCase ):
try:
lowerCamelCase__ : str =word.index(lowerCamelCase, lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase__ : str =j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase__ : Optional[int] =tuple(lowerCamelCase )
lowerCamelCase__ : int =new_word
if len(lowerCamelCase ) == 1:
break
else:
lowerCamelCase__ : Dict =get_pairs(lowerCamelCase )
lowerCamelCase__ : Tuple =''' '''.join(lowerCamelCase )
lowerCamelCase__ : List[Any] =word
return word
def snake_case ( self : List[Any], lowerCamelCase : List[str] )-> List[Any]:
lowerCamelCase__ : Tuple =[]
for token in re.findall(self.pat, lowerCamelCase ):
lowerCamelCase__ : str =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(''' ''' ) )
return bpe_tokens
def snake_case ( self : List[Any], lowerCamelCase : str )-> str:
return self.encoder.get(lowerCamelCase, self.encoder.get(self.unk_token ) )
def snake_case ( self : Optional[Any], lowerCamelCase : Any )-> Union[str, Any]:
return self.decoder.get(lowerCamelCase )
def snake_case ( self : Optional[Any], lowerCamelCase : int )-> List[Any]:
lowerCamelCase__ : Dict =''''''.join(lowerCamelCase )
lowerCamelCase__ : List[str] =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors )
return text
def snake_case ( self : List[Any], lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Dict =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : List[str] =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCamelCase, ensure_ascii=lowerCamelCase ) + '''\n''' )
lowerCamelCase__ : Union[str, Any] =0
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowerCamelCase__ : List[Any] =token_index
writer.write(''' '''.join(lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def snake_case ( self : Optional[int], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : Dict =[self.cls_token_id]
lowerCamelCase__ : Any =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self : Tuple, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None, lowerCamelCase : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase, token_ids_a=lowerCamelCase, already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def snake_case ( self : Dict, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None )-> List[int]:
lowerCamelCase__ : int =[self.sep_token_id]
lowerCamelCase__ : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self : int, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any]=False, **lowerCamelCase : Tuple )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =kwargs.pop('''add_prefix_space''', self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
lowerCamelCase__ : List[Any] =''' ''' + text
return (text, kwargs)
def snake_case ( self : List[str], lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding], lowerCamelCase : Optional[int] = None, lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[bool] = None, )-> dict:
lowerCamelCase__ : str =super()._pad(
encoded_inputs=lowerCamelCase, max_length=lowerCamelCase, padding_strategy=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, )
# Load from model defaults
if return_attention_mask is None:
lowerCamelCase__ : Dict ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCamelCase__ : Tuple =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCamelCase__ : Optional[Any] =len(encoded_inputs['''global_attention_mask'''] ) != len(lowerCamelCase )
if needs_to_be_padded:
lowerCamelCase__ : Tuple =len(lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCamelCase__ : Any =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowerCamelCase__ : Dict =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[int] ):
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCamelCase__ : Any =numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
lowerCamelCase__ : Dict =numbers[i]
if number < 0:
lowerCamelCase__ , lowerCamelCase__ : List[Any] =min_till_now, max_till_now
lowerCamelCase__ : Optional[int] =max(__lowerCamelCase , max_till_now * number )
lowerCamelCase__ : Dict =min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
lowerCamelCase__ : Tuple =max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 625 | 1 |
"""simple docstring"""
import requests
_lowercase : List[str] = "YOUR API KEY"
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str = giphy_api_key ):
"""simple docstring"""
lowerCamelCase__ : List[str] ='''+'''.join(query.split() )
lowerCamelCase__ : Union[str, Any] =f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
lowerCamelCase__ : Optional[Any] =requests.get(__lowerCamelCase ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 625 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 42
_a = 42
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 42
_a = (1_6, 3_2, 9_6, 2_5_6)
_a = jnp.floataa
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Tuple =nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
lowerCamelCase__ : Dict =[]
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ : Dict =self.block_out_channels[i]
lowerCamelCase__ : Dict =self.block_out_channels[i + 1]
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Optional[int] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Any =blocks
lowerCamelCase__ : Optional[int] =nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : Any, lowerCamelCase : int )-> List[str]:
lowerCamelCase__ : Tuple =self.conv_in(lowerCamelCase )
lowerCamelCase__ : Dict =nn.silu(lowerCamelCase )
for block in self.blocks:
lowerCamelCase__ : str =block(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =self.conv_out(lowerCamelCase )
return embedding
@flax_register_to_config
class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_a = 3_2
_a = 4
_a = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_a = False
_a = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_a = 2
_a = 8
_a = None
_a = 1_2_8_0
_a = 0.0
_a = False
_a = jnp.floataa
_a = True
_a = 0
_a = "rgb"
_a = (1_6, 3_2, 9_6, 2_5_6)
def snake_case ( self : str, lowerCamelCase : jax.random.KeyArray )-> FrozenDict:
# init input tensors
lowerCamelCase__ : int =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : int =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ : Union[str, Any] =jnp.ones((1,), dtype=jnp.intaa )
lowerCamelCase__ : str =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
lowerCamelCase__ : Any =(1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ : Optional[Any] =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =jax.random.split(lowerCamelCase )
lowerCamelCase__ : Dict ={'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )["params"]
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Optional[int] =self.block_out_channels
lowerCamelCase__ : Tuple =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : List[Any] =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : int =nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
lowerCamelCase__ : str =FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
lowerCamelCase__ : Dict =FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype )
lowerCamelCase__ : List[Any] =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
lowerCamelCase__ : Dict =self.only_cross_attention
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =(only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[str] =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : Union[str, Any] =[]
lowerCamelCase__ : Dict =[]
lowerCamelCase__ : List[Any] =block_out_channels[0]
lowerCamelCase__ : List[Any] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : List[Any] =output_channel
lowerCamelCase__ : str =block_out_channels[i]
lowerCamelCase__ : Dict =i == len(lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : str =FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
lowerCamelCase__ : List[Any] =FlaxDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(lowerCamelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
if not is_final_block:
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
lowerCamelCase__ : int =down_blocks
lowerCamelCase__ : List[str] =controlnet_down_blocks
# mid
lowerCamelCase__ : Tuple =block_out_channels[-1]
lowerCamelCase__ : List[Any] =FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : int, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : float = 1.0, lowerCamelCase : bool = True, lowerCamelCase : bool = False, )-> Union[FlaxControlNetOutput, Tuple]:
lowerCamelCase__ : int =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ : int =jnp.flip(lowerCamelCase, axis=1 )
# 1. time
if not isinstance(lowerCamelCase, jnp.ndarray ):
lowerCamelCase__ : Any =jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(lowerCamelCase, jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : int =jnp.expand_dims(lowerCamelCase, 0 )
lowerCamelCase__ : Optional[Any] =self.time_proj(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.time_embedding(lowerCamelCase )
# 2. pre-process
lowerCamelCase__ : Optional[int] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : Dict =self.conv_in(lowerCamelCase )
lowerCamelCase__ : List[str] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : int =self.controlnet_cond_embedding(lowerCamelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase__ : Union[str, Any] =(sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : Dict =down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : Tuple =down_block(lowerCamelCase, lowerCamelCase, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ : Optional[int] =self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ : Optional[Any] =()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks ):
lowerCamelCase__ : Union[str, Any] =controlnet_block(lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : List[str] =controlnet_down_block_res_samples
lowerCamelCase__ : List[str] =self.controlnet_mid_block(lowerCamelCase )
# 6. scaling
lowerCamelCase__ : Union[str, Any] =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase )
| 625 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = KandinskyVaaImgaImgPipeline
_a = ['image_embeds', 'negative_image_embeds', 'image']
_a = [
'image_embeds',
'negative_image_embeds',
'image',
]
_a = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_a = False
@property
def snake_case ( self : Optional[Any] )-> List[Any]:
return 32
@property
def snake_case ( self : Optional[Any] )-> int:
return 32
@property
def snake_case ( self : List[str] )-> Any:
return self.time_input_dim
@property
def snake_case ( self : Tuple )-> Dict:
return self.time_input_dim * 4
@property
def snake_case ( self : int )-> Dict:
return 100
@property
def snake_case ( self : str )-> Optional[int]:
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCamelCase__ : Union[str, Any] =UNetaDConditionModel(**lowerCamelCase )
return model
@property
def snake_case ( self : Tuple )-> Optional[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case ( self : Dict )-> Any:
torch.manual_seed(0 )
lowerCamelCase__ : Dict =VQModel(**self.dummy_movq_kwargs )
return model
def snake_case ( self : Dict )-> Any:
lowerCamelCase__ : Dict =self.dummy_unet
lowerCamelCase__ : int =self.dummy_movq
lowerCamelCase__ : str ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowerCamelCase__ : Dict =DDIMScheduler(**lowerCamelCase )
lowerCamelCase__ : int ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def snake_case ( self : Optional[int], lowerCamelCase : Optional[int], lowerCamelCase : Optional[int]=0 )-> List[Any]:
lowerCamelCase__ : str =floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
lowerCamelCase__ : List[Any] =floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase )
# create init_image
lowerCamelCase__ : Optional[int] =floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
lowerCamelCase__ : Dict =image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : str =Image.fromarray(np.uinta(lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : Optional[Any] =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : List[str] =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : int ={
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def snake_case ( self : Optional[Any] )-> Optional[Any]:
lowerCamelCase__ : str ='''cpu'''
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : List[str] =self.pipeline_class(**lowerCamelCase )
lowerCamelCase__ : Dict =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =pipe(**self.get_dummy_inputs(lowerCamelCase ) )
lowerCamelCase__ : int =output.images
lowerCamelCase__ : Optional[Any] =pipe(
**self.get_dummy_inputs(lowerCamelCase ), return_dict=lowerCamelCase, )[0]
lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1]
lowerCamelCase__ : Optional[int] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : Any =np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : int )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : Any =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
lowerCamelCase__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCamelCase__ : Union[str, Any] ='''A red cartoon frog, 4k'''
lowerCamelCase__ : Union[str, Any] =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase )
lowerCamelCase__ : Any =KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''', torch_dtype=torch.floataa )
lowerCamelCase__ : List[Any] =pipeline.to(lowerCamelCase )
pipeline.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Optional[int] =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : int =pipe_prior(
lowerCamelCase, generator=lowerCamelCase, num_inference_steps=5, negative_prompt='''''', ).to_tuple()
lowerCamelCase__ : Optional[int] =pipeline(
image=lowerCamelCase, image_embeds=lowerCamelCase, negative_image_embeds=lowerCamelCase, generator=lowerCamelCase, num_inference_steps=100, height=768, width=768, strength=0.2, output_type='''np''', )
lowerCamelCase__ : Tuple =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
| 625 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["CLIPFeatureExtractor"]
_lowercase : int = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 625 | 1 |
"""simple docstring"""
_lowercase : int = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 625 |
"""simple docstring"""
import os
def snake_case__ ( ):
"""simple docstring"""
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowerCamelCase__ : Tuple =str(file.readlines()[0] )
lowerCamelCase__ : int =names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : str =0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict =0
return total_score
if __name__ == "__main__":
print(solution())
| 625 | 1 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Dict, lowerCamelCase : Distribution, lowerCamelCase : int=None, lowerCamelCase : Tuple=None, lowerCamelCase : Tuple=0 )-> int:
lowerCamelCase__ : Tuple =1.0 if scale is None else scale
lowerCamelCase__ : Union[str, Any] =0.0 if loc is None else loc
super().__init__(lowerCamelCase, [AffineTransform(loc=self.loc, scale=self.scale, event_dim=lowerCamelCase )] )
@property
def snake_case ( self : Optional[int] )-> Any:
return self.base_dist.mean * self.scale + self.loc
@property
def snake_case ( self : Optional[Any] )-> Tuple:
return self.base_dist.variance * self.scale**2
@property
def snake_case ( self : List[str] )-> Dict:
return self.variance.sqrt()
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : Dict[str, int], lowerCamelCase : Callable[..., Tuple[torch.Tensor]], **lowerCamelCase : List[str] )-> None:
super().__init__(**lowerCamelCase )
lowerCamelCase__ : Any =args_dim
lowerCamelCase__ : int =nn.ModuleList([nn.Linear(lowerCamelCase, lowerCamelCase ) for dim in args_dim.values()] )
lowerCamelCase__ : Optional[Any] =domain_map
def snake_case ( self : Optional[Any], lowerCamelCase : torch.Tensor )-> Tuple[torch.Tensor]:
lowerCamelCase__ : int =[proj(lowerCamelCase ) for proj in self.proj]
return self.domain_map(*lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Tuple )-> Optional[int]:
super().__init__()
lowerCamelCase__ : Tuple =function
def snake_case ( self : Dict, lowerCamelCase : Tuple, *lowerCamelCase : Union[str, Any] )-> List[str]:
return self.function(lowerCamelCase, *lowerCamelCase )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = 42
_a = 42
_a = 42
def __init__( self : int, lowerCamelCase : int = 1 )-> None:
lowerCamelCase__ : str =dim
lowerCamelCase__ : Any ={k: dim * self.args_dim[k] for k in self.args_dim}
def snake_case ( self : Any, lowerCamelCase : List[Any] )-> Dict:
if self.dim == 1:
return self.distribution_class(*lowerCamelCase )
else:
return Independent(self.distribution_class(*lowerCamelCase ), 1 )
def snake_case ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, )-> Distribution:
lowerCamelCase__ : Tuple =self._base_distribution(lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase, loc=lowerCamelCase, scale=lowerCamelCase, event_dim=self.event_dim )
@property
def snake_case ( self : str )-> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def snake_case ( self : str )-> int:
return len(self.event_shape )
@property
def snake_case ( self : List[Any] )-> float:
return 0.0
def snake_case ( self : List[Any], lowerCamelCase : int )-> nn.Module:
return ParameterProjection(
in_features=lowerCamelCase, args_dim=self.args_dim, domain_map=LambdaLayer(self.domain_map ), )
def snake_case ( self : str, *lowerCamelCase : torch.Tensor )-> List[Any]:
raise NotImplementedError()
@staticmethod
def snake_case ( lowerCamelCase : torch.Tensor )-> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = {"df": 1, "loc": 1, "scale": 1}
_a = StudentT
@classmethod
def snake_case ( cls : List[str], lowerCamelCase : torch.Tensor, lowerCamelCase : torch.Tensor, lowerCamelCase : torch.Tensor )-> List[Any]:
lowerCamelCase__ : Tuple =cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCamelCase__ : Union[str, Any] =2.0 + cls.squareplus(lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = {"loc": 1, "scale": 1}
_a = Normal
@classmethod
def snake_case ( cls : Dict, lowerCamelCase : torch.Tensor, lowerCamelCase : torch.Tensor )-> Any:
lowerCamelCase__ : Dict =cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = {"total_count": 1, "logits": 1}
_a = NegativeBinomial
@classmethod
def snake_case ( cls : Optional[Any], lowerCamelCase : torch.Tensor, lowerCamelCase : torch.Tensor )-> Optional[int]:
lowerCamelCase__ : int =cls.squareplus(lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def snake_case ( self : int, lowerCamelCase : Optional[Any] )-> Distribution:
lowerCamelCase__ , lowerCamelCase__ : List[Any] =distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase, logits=lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase, logits=lowerCamelCase ), 1 )
def snake_case ( self : List[str], lowerCamelCase : Tuple, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None )-> Distribution:
lowerCamelCase__ , lowerCamelCase__ : int =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 625 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : int )-> None:
lowerCamelCase__ : str =value
lowerCamelCase__ : Node | None =None
lowerCamelCase__ : Node | None =None
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int, lowerCamelCase : Node )-> None:
lowerCamelCase__ : Any =tree
def snake_case ( self : str, lowerCamelCase : Node | None )-> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict )-> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 1 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def snake_case__ ( *__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Union[Dict, Any]] = None , __lowerCamelCase : str=True , __lowerCamelCase : List[str]=2 ):
"""simple docstring"""
from .. import __version__
lowerCamelCase__ : str =take_from
lowerCamelCase__ : Optional[Any] =()
if not isinstance(args[0] , __lowerCamelCase ):
lowerCamelCase__ : str =(args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__lowerCamelCase ).base_version ) >= version.parse(__lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
lowerCamelCase__ : List[str] =None
if isinstance(__lowerCamelCase , __lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__lowerCamelCase ),)
lowerCamelCase__ : Optional[Any] =f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(__lowerCamelCase , __lowerCamelCase ):
values += (getattr(__lowerCamelCase , __lowerCamelCase ),)
lowerCamelCase__ : Tuple =f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
lowerCamelCase__ : Tuple =f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
lowerCamelCase__ : List[Any] =warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , __lowerCamelCase , stacklevel=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) > 0:
lowerCamelCase__ : List[str] =inspect.getouterframes(inspect.currentframe() )[1]
lowerCamelCase__ : List[str] =call_frame.filename
lowerCamelCase__ : List[str] =call_frame.lineno
lowerCamelCase__ : int =call_frame.function
lowerCamelCase__ , lowerCamelCase__ : Dict =next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(__lowerCamelCase ) == 0:
return
elif len(__lowerCamelCase ) == 1:
return values[0]
return values
| 625 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowercase : List[str] = logging.getLogger(__name__)
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
"""simple docstring"""
# save results
if os.path.exists(__lowerCamelCase ):
if os.path.exists(os.path.join(__lowerCamelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''config.json''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''config.json''' ) )
if os.path.exists(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =2
if unlogit:
lowerCamelCase__ : Any =torch.pow(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[str] =p * torch.log(__lowerCamelCase )
lowerCamelCase__ : Tuple =0
return -plogp.sum(dim=-1 )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(__lowerCamelCase ) ) ) )
for row in range(len(__lowerCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=False ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Tuple =model.config.num_hidden_layers, model.config.num_attention_heads
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
if head_mask is None:
lowerCamelCase__ : List[Any] =torch.ones(__lowerCamelCase , __lowerCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : List[str] =0.0
lowerCamelCase__ : Union[str, Any] =0.0
for step, inputs in enumerate(tqdm(__lowerCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowerCamelCase__ : Any =tuple(t.to(args.device ) for t in inputs )
((lowerCamelCase__) , ) : Any =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCamelCase__ : Dict =model(__lowerCamelCase , labels=__lowerCamelCase , head_mask=__lowerCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Any =entropy(attn.detach() , __lowerCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCamelCase__ : int =2
lowerCamelCase__ : List[str] =torch.pow(torch.pow(__lowerCamelCase , __lowerCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
lowerCamelCase__ : int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(__lowerCamelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(__lowerCamelCase )
logger.info('''Head ranked by importance scores''' )
lowerCamelCase__ : Optional[int] =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCamelCase__ : Dict =torch.arange(
head_importance.numel() , device=args.device )
lowerCamelCase__ : Any =head_ranks.view_as(__lowerCamelCase )
print_ad_tensor(__lowerCamelCase )
return attn_entropy, head_importance, total_loss
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase )
lowerCamelCase__ : int =1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __lowerCamelCase , original_score * args.masking_threshold )
lowerCamelCase__ : Dict =torch.ones_like(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCamelCase__ : List[Any] =original_score
while current_score >= original_score * args.masking_threshold:
lowerCamelCase__ : List[Any] =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCamelCase__ : int =float('''Inf''' )
lowerCamelCase__ : Union[str, Any] =head_importance.view(-1 ).sort()[1]
if len(__lowerCamelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowerCamelCase__ : List[str] =current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowerCamelCase__ : Optional[int] =new_head_mask.view(-1 )
lowerCamelCase__ : Optional[Any] =0.0
lowerCamelCase__ : Dict =new_head_mask.view_as(__lowerCamelCase )
lowerCamelCase__ : Tuple =new_head_mask.clone().detach()
print_ad_tensor(__lowerCamelCase )
# Compute metric and head importance again
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Any =1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(__lowerCamelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Tuple =1 / loss
lowerCamelCase__ : Optional[Any] =datetime.now() - before_time
lowerCamelCase__ : int =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Optional[int] =[
v,
]
assert sum(len(__lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCamelCase )
lowerCamelCase__ : List[str] =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase , actually_pruned=__lowerCamelCase , )
lowerCamelCase__ : str =1 / loss
lowerCamelCase__ : Union[str, Any] =datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __lowerCamelCase , __lowerCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __lowerCamelCase , __lowerCamelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(__lowerCamelCase , args.output_dir )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__lowerCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__lowerCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__lowerCamelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=__lowerCamelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowerCamelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__lowerCamelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=__lowerCamelCase , default=42 )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
lowerCamelCase__ : List[Any] =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCamelCase__ : Dict =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowerCamelCase__ : Dict =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCamelCase__ : str =torch.device('''cuda''' , args.local_rank )
lowerCamelCase__ : Any =1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCamelCase__ : Union[str, Any] =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCamelCase__ : List[Any] =nn.parallel.DistributedDataParallel(
__lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCamelCase )
elif args.n_gpu > 1:
lowerCamelCase__ : int =nn.DataParallel(__lowerCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Prepare dataset
lowerCamelCase__ : Union[str, Any] =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCamelCase__ : Any =(torch.from_numpy(__lowerCamelCase ),)
lowerCamelCase__ : List[Any] =TensorDataset(*__lowerCamelCase )
lowerCamelCase__ : List[str] =RandomSampler(__lowerCamelCase )
lowerCamelCase__ : Dict =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCamelCase__ : Optional[int] =mask_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
prune_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 625 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_lowercase : Union[str, Any] = "Create a default config file for Accelerate with only a few flags set."
def snake_case__ ( __lowerCamelCase : Dict="no" , __lowerCamelCase : str = default_json_config_file , __lowerCamelCase : bool = False ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =Path(__lowerCamelCase )
path.parent.mkdir(parents=__lowerCamelCase , exist_ok=__lowerCamelCase )
if path.exists():
print(
f'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
lowerCamelCase__ : List[str] =mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
lowerCamelCase__ : List[str] ={
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
lowerCamelCase__ : int =torch.cuda.device_count()
lowerCamelCase__ : Optional[Any] =num_gpus
lowerCamelCase__ : Union[str, Any] =False
if num_gpus > 1:
lowerCamelCase__ : int ='''MULTI_GPU'''
else:
lowerCamelCase__ : Any ='''NO'''
elif is_xpu_available() and use_xpu:
lowerCamelCase__ : Union[str, Any] =torch.xpu.device_count()
lowerCamelCase__ : Union[str, Any] =num_xpus
lowerCamelCase__ : Optional[Any] =False
if num_xpus > 1:
lowerCamelCase__ : Union[str, Any] ='''MULTI_XPU'''
else:
lowerCamelCase__ : str ='''NO'''
elif is_npu_available():
lowerCamelCase__ : List[str] =torch.npu.device_count()
lowerCamelCase__ : Tuple =num_npus
lowerCamelCase__ : str =False
if num_npus > 1:
lowerCamelCase__ : List[Any] ='''MULTI_NPU'''
else:
lowerCamelCase__ : List[Any] ='''NO'''
else:
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : List[str] =True
lowerCamelCase__ : Union[str, Any] =1
lowerCamelCase__ : int ='''NO'''
lowerCamelCase__ : List[Any] =ClusterConfig(**__lowerCamelCase )
config.to_json_file(__lowerCamelCase )
return path
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : str =parser.add_parser('''default''' , parents=__lowerCamelCase , help=__lowerCamelCase , formatter_class=__lowerCamelCase )
parser.add_argument(
'''--config_file''' , default=__lowerCamelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=__lowerCamelCase , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=__lowerCamelCase )
return parser
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f'''accelerate configuration saved at {config_file}''' )
| 625 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =AutoConfig.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : Any =FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ='''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
lowerCamelCase__ : List[str] ='''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCamelCase__ : List[Any] ='''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[Any] ='''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : List[Any] =f'''layers_{str(__lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : str =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase__ : Dict =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : Tuple =tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase__ : str =flax_model.params['''encoder''']['''block'''][str(__lowerCamelCase )]['''layer''']
lowerCamelCase__ : int =tax_attention_key
lowerCamelCase__ : Optional[int] =tax_attention_out
lowerCamelCase__ : List[Any] =tax_attention_query
lowerCamelCase__ : Optional[Any] =tax_attention_value
lowerCamelCase__ : List[str] =tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] =tax_global_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
else:
lowerCamelCase__ : Union[str, Any] =tax_mlp_wi
lowerCamelCase__ : str =tax_mlp_wo
lowerCamelCase__ : Optional[Any] =tax_mlp_layer_norm
lowerCamelCase__ : Optional[int] =flax_model_encoder_layer_block
# Only for layer 0:
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : str =tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : Optional[int] =tax_encoder_global_rel_embedding
# Assigning
lowerCamelCase__ : int =tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
lowerCamelCase__ : List[Any] =tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : Dict =f'''layers_{str(__lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_module['''key''']['''kernel''']
lowerCamelCase__ : Any =tax_enc_dec_attention_module['''out''']['''kernel''']
lowerCamelCase__ : Dict =tax_enc_dec_attention_module['''query''']['''kernel''']
lowerCamelCase__ : List[str] =tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase__ : Any =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase__ : str =flax_model.params['''decoder''']['''block'''][str(__lowerCamelCase )]['''layer''']
lowerCamelCase__ : Union[str, Any] =tax_attention_key
lowerCamelCase__ : str =tax_attention_out
lowerCamelCase__ : Optional[int] =tax_attention_query
lowerCamelCase__ : Dict =tax_attention_value
lowerCamelCase__ : List[str] =tax_pre_attention_layer_norm
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_key
lowerCamelCase__ : Any =tax_enc_dec_attention_out
lowerCamelCase__ : Any =tax_enc_dec_attention_query
lowerCamelCase__ : Optional[int] =tax_enc_dec_attention_value
lowerCamelCase__ : Dict =tax_cross_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Tuple =tax_mlp_wi_a
lowerCamelCase__ : int =tax_mlp_wi_a
else:
lowerCamelCase__ : List[Any] =tax_mlp_wi
lowerCamelCase__ : Dict =tax_mlp_wo
lowerCamelCase__ : Tuple =txa_mlp_layer_norm
lowerCamelCase__ : Optional[Any] =flax_model_decoder_layer_block
# Decoder Normalization
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
lowerCamelCase__ : int =txa_decoder_norm
# Only for layer 0:
lowerCamelCase__ : Tuple =tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : Tuple =tax_decoder_rel_embedding
# Token Embeddings
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''token_embedder''']['''embedding''']
lowerCamelCase__ : Dict =txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCamelCase__ : int =tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(__lowerCamelCase )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
_lowercase : List[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 625 | 1 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_lowercase : Union[str, Any] = random.Random()
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : str=1.0 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[str]=None ):
"""simple docstring"""
if rng is None:
lowerCamelCase__ : Optional[Any] =global_rng
lowerCamelCase__ : List[Any] =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str, lowerCamelCase : List[str], lowerCamelCase : str=7, lowerCamelCase : Optional[int]=400, lowerCamelCase : Any=2000, lowerCamelCase : int=10, lowerCamelCase : Optional[Any]=160, lowerCamelCase : Dict=8, lowerCamelCase : Dict=0.0, lowerCamelCase : Any=4000, lowerCamelCase : Dict=False, lowerCamelCase : List[str]=True, )-> Optional[int]:
lowerCamelCase__ : List[Any] =parent
lowerCamelCase__ : Union[str, Any] =batch_size
lowerCamelCase__ : List[str] =min_seq_length
lowerCamelCase__ : List[str] =max_seq_length
lowerCamelCase__ : Union[str, Any] =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase__ : Tuple =padding_value
lowerCamelCase__ : List[Any] =sampling_rate
lowerCamelCase__ : str =return_attention_mask
lowerCamelCase__ : Optional[Any] =do_normalize
lowerCamelCase__ : Optional[int] =feature_size
lowerCamelCase__ : str =chunk_length
lowerCamelCase__ : Optional[Any] =hop_length
def snake_case ( self : Tuple )-> Any:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case ( self : Any, lowerCamelCase : Optional[int]=False, lowerCamelCase : int=False )-> Optional[Any]:
def _flatten(lowerCamelCase : Any ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
lowerCamelCase__ : List[Any] =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase__ : Optional[Any] =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
lowerCamelCase__ : List[Any] =[np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = WhisperFeatureExtractor if is_speech_available() else None
def snake_case ( self : List[Any] )-> Dict:
lowerCamelCase__ : Optional[int] =WhisperFeatureExtractionTester(self )
def snake_case ( self : str )-> List[str]:
lowerCamelCase__ : List[str] =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : List[Any] =feat_extract_first.save_pretrained(lowerCamelCase )[0]
check_json_file_has_correct_format(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.feature_extraction_class.from_pretrained(lowerCamelCase )
lowerCamelCase__ : List[str] =feat_extract_first.to_dict()
lowerCamelCase__ : Union[str, Any] =feat_extract_second.to_dict()
lowerCamelCase__ : Any =feat_extract_first.mel_filters
lowerCamelCase__ : int =feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase, lowerCamelCase ) )
self.assertEqual(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> List[Any]:
lowerCamelCase__ : str =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : Any =os.path.join(lowerCamelCase, '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCamelCase )
lowerCamelCase__ : Any =self.feature_extraction_class.from_json_file(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =feat_extract_first.to_dict()
lowerCamelCase__ : Union[str, Any] =feat_extract_second.to_dict()
lowerCamelCase__ : str =feat_extract_first.mel_filters
lowerCamelCase__ : List[Any] =feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase, lowerCamelCase ) )
self.assertEqual(lowerCamelCase, lowerCamelCase )
def snake_case ( self : str )-> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase__ : Union[str, Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase__ : List[Any] =[floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
lowerCamelCase__ : Dict =[np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase__ : List[Any] =feature_extractor(lowerCamelCase, padding='''max_length''', return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase__ : Optional[Any] =feature_extractor(speech_inputs[0], return_tensors='''np''' ).input_features
lowerCamelCase__ : List[Any] =feature_extractor(np_speech_inputs[0], return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase, lowerCamelCase, atol=1E-3 ) )
# Test batched
lowerCamelCase__ : Dict =feature_extractor(lowerCamelCase, return_tensors='''np''' ).input_features
lowerCamelCase__ : List[Any] =feature_extractor(lowerCamelCase, return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase, lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase, lowerCamelCase, atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase__ : Union[str, Any] =[floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase__ : int =np.asarray(lowerCamelCase )
lowerCamelCase__ : Any =feature_extractor(lowerCamelCase, return_tensors='''np''' ).input_features
lowerCamelCase__ : Optional[Any] =feature_extractor(lowerCamelCase, return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase, lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase, lowerCamelCase, atol=1E-3 ) )
# Test truncation required
lowerCamelCase__ : int =[floats_list((1, x) )[0] for x in range(200, (feature_extractor.n_samples + 500), 200 )]
lowerCamelCase__ : Any =[np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
lowerCamelCase__ : List[Any] =[x[: feature_extractor.n_samples] for x in speech_inputs]
lowerCamelCase__ : Optional[int] =[np.asarray(lowerCamelCase ) for speech_input in speech_inputs_truncated]
lowerCamelCase__ : List[Any] =feature_extractor(lowerCamelCase, return_tensors='''np''' ).input_features
lowerCamelCase__ : int =feature_extractor(lowerCamelCase, return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase, lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase, lowerCamelCase, atol=1E-3 ) )
def snake_case ( self : List[Any] )-> Tuple:
import torch
lowerCamelCase__ : List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Union[str, Any] =np.random.rand(100, 32 ).astype(np.floataa )
lowerCamelCase__ : Optional[Any] =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase__ : int =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase__ : Optional[int] =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case ( self : Any, lowerCamelCase : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ : Tuple =load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
# automatic decoding with librispeech
lowerCamelCase__ : str =ds.sort('''id''' ).select(range(lowerCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case ( self : Tuple )-> Dict:
# fmt: off
lowerCamelCase__ : str =torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
lowerCamelCase__ : Union[str, Any] =self._load_datasamples(1 )
lowerCamelCase__ : Optional[int] =WhisperFeatureExtractor()
lowerCamelCase__ : Any =feature_extractor(lowerCamelCase, return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape, (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30], lowerCamelCase, atol=1E-4 ) )
def snake_case ( self : Optional[Any] )-> str:
lowerCamelCase__ : Optional[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Optional[int] =self._load_datasamples(1 )[0]
lowerCamelCase__ : Any =((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
lowerCamelCase__ : Union[str, Any] =feat_extract.zero_mean_unit_var_norm([audio], attention_mask=lowerCamelCase )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase ) - 1 ) < 1E-3 ) )
| 625 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : List[Any]=32, lowerCamelCase : Dict=3, lowerCamelCase : int=4, lowerCamelCase : str=[10, 20, 30, 40], lowerCamelCase : Any=[2, 2, 3, 2], lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : str=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[int]=10, lowerCamelCase : Any=0.02, lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"], lowerCamelCase : Optional[int]=3, lowerCamelCase : Tuple=None, )-> List[str]:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Tuple =batch_size
lowerCamelCase__ : str =image_size
lowerCamelCase__ : Any =num_channels
lowerCamelCase__ : Tuple =num_stages
lowerCamelCase__ : List[str] =hidden_sizes
lowerCamelCase__ : Any =depths
lowerCamelCase__ : Union[str, Any] =is_training
lowerCamelCase__ : Tuple =use_labels
lowerCamelCase__ : int =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : Dict =type_sequence_label_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Any =out_features
lowerCamelCase__ : Tuple =num_labels
lowerCamelCase__ : Optional[int] =scope
lowerCamelCase__ : Optional[int] =num_stages
def snake_case ( self : str )-> Optional[int]:
lowerCamelCase__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple =None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int =self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] )-> Any:
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def snake_case ( self : Union[str, Any] )-> Any:
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowerCamelCase, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowerCamelCase, loss_ignore_index=255, num_labels=self.num_labels, )
def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ : List[str] =UperNetForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Dict =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any =config_and_inputs
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_a = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =UperNetModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Optional[int] )-> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] )-> Dict:
return
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
lowerCamelCase__ : Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple =[*signature.parameters.keys()]
lowerCamelCase__ : List[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def snake_case ( self : Any )-> Union[str, Any]:
lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def snake_case ( self : Optional[Any] )-> List[Any]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def snake_case ( self : Any )-> List[str]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : int )-> Any:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : Dict )-> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case ( self : List[Any] )-> List[str]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : Tuple )-> str:
pass
def snake_case ( self : Optional[int] )-> List[str]:
def check_hidden_states_output(lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[str] ):
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : List[str] =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : str =_config_zero_init(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =_config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def snake_case ( self : Any )-> str:
pass
@slow
def snake_case ( self : int )-> Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : str =UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
lowerCamelCase__ : List[str] =Image.open(__lowerCamelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : str )-> Union[str, Any]:
lowerCamelCase__ : List[Any] =AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
lowerCamelCase__ : List[Any] =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : List[Any] =prepare_img()
lowerCamelCase__ : List[Any] =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : Dict =torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
lowerCamelCase__ : Tuple =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : Dict =prepare_img()
lowerCamelCase__ : Any =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : Any =model(**lowerCamelCase )
lowerCamelCase__ : Dict =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : List[str] =torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
| 625 | 1 |
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : str, lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ : Union[str, Any] =name
lowerCamelCase__ : int =val
def __str__( self : List[str] )-> Any:
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self : Dict, lowerCamelCase : str )-> List[Any]:
return self.val < other.val
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int], lowerCamelCase : Optional[int] )-> Any:
lowerCamelCase__ : str ={}
lowerCamelCase__ : List[Any] ={}
lowerCamelCase__ : int =self.build_heap(lowerCamelCase )
def __getitem__( self : Union[str, Any], lowerCamelCase : str )-> Optional[Any]:
return self.get_value(lowerCamelCase )
def snake_case ( self : Union[str, Any], lowerCamelCase : Any )-> Tuple:
return (idx - 1) // 2
def snake_case ( self : int, lowerCamelCase : List[Any] )-> str:
return idx * 2 + 1
def snake_case ( self : List[str], lowerCamelCase : str )-> Tuple:
return idx * 2 + 2
def snake_case ( self : Optional[Any], lowerCamelCase : int )-> str:
return self.heap_dict[key]
def snake_case ( self : Tuple, lowerCamelCase : Dict )-> List[Any]:
lowerCamelCase__ : List[str] =len(lowerCamelCase ) - 1
lowerCamelCase__ : Tuple =self.get_parent_idx(lowerCamelCase )
for idx, i in enumerate(lowerCamelCase ):
lowerCamelCase__ : str =idx
lowerCamelCase__ : Dict =i.val
for i in range(lowerCamelCase, -1, -1 ):
self.sift_down(lowerCamelCase, lowerCamelCase )
return array
def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Any )-> Optional[int]:
while True:
lowerCamelCase__ : Tuple =self.get_left_child_idx(lowerCamelCase ) # noqa: E741
lowerCamelCase__ : Any =self.get_right_child_idx(lowerCamelCase )
lowerCamelCase__ : Dict =idx
if l < len(lowerCamelCase ) and array[l] < array[idx]:
lowerCamelCase__ : Tuple =l
if r < len(lowerCamelCase ) and array[r] < array[smallest]:
lowerCamelCase__ : int =r
if smallest != idx:
lowerCamelCase__ , lowerCamelCase__ : int =array[smallest], array[idx]
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any =(
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCamelCase__ : Dict =smallest
else:
break
def snake_case ( self : List[str], lowerCamelCase : Union[str, Any] )-> List[str]:
lowerCamelCase__ : Optional[Any] =self.get_parent_idx(lowerCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCamelCase__ , lowerCamelCase__ : Any =self.heap[idx], self.heap[p]
lowerCamelCase__ , lowerCamelCase__ : List[str] =(
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCamelCase__ : Tuple =p
lowerCamelCase__ : List[Any] =self.get_parent_idx(lowerCamelCase )
def snake_case ( self : Optional[Any] )-> int:
return self.heap[0]
def snake_case ( self : Dict )-> Optional[int]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.heap[-1], self.heap[0]
lowerCamelCase__ , lowerCamelCase__ : int =(
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCamelCase__ : Dict =self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0, self.heap )
return x
def snake_case ( self : int, lowerCamelCase : List[str] )-> Union[str, Any]:
self.heap.append(lowerCamelCase )
lowerCamelCase__ : str =len(self.heap ) - 1
lowerCamelCase__ : Dict =node.val
self.sift_up(len(self.heap ) - 1 )
def snake_case ( self : int )-> str:
return len(self.heap ) == 0
def snake_case ( self : Tuple, lowerCamelCase : Optional[Any], lowerCamelCase : Any )-> Optional[int]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCamelCase__ : Any =new_value
lowerCamelCase__ : List[str] =new_value
self.sift_up(self.idx_of_element[node] )
_lowercase : List[Any] = Node("R", -1)
_lowercase : Optional[int] = Node("B", 6)
_lowercase : Dict = Node("A", 3)
_lowercase : Any = Node("X", 1)
_lowercase : List[Any] = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_lowercase : List[Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
_a = ['onnx']
def __init__( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : str )-> Optional[int]:
requires_backends(self, ['''onnx'''] )
@classmethod
def snake_case ( cls : List[str], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
@classmethod
def snake_case ( cls : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : Tuple )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
| 625 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =[redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
lowerCamelCase__ : Optional[Any] =1 - (matter_density + radiation_density + dark_energy)
lowerCamelCase__ : Any =(
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowerCamelCase__ : Union[str, Any] =hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowercase : int = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 625 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =x
lowerCamelCase__ : Any =y
for step in range(__lowerCamelCase ): # noqa: B007
lowerCamelCase__ : List[Any] =a * a - b * b + x
lowerCamelCase__ : Optional[int] =2 * a * b + y
lowerCamelCase__ : Union[str, Any] =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) )
def snake_case__ ( __lowerCamelCase : int = 800 , __lowerCamelCase : int = 600 , __lowerCamelCase : float = -0.6 , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 3.2 , __lowerCamelCase : int = 50 , __lowerCamelCase : bool = True , ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =Image.new('''RGB''' , (image_width, image_height) )
lowerCamelCase__ : Optional[int] =img.load()
# loop through the image-coordinates
for image_x in range(__lowerCamelCase ):
for image_y in range(__lowerCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase__ : Optional[Any] =figure_width / image_width * image_height
lowerCamelCase__ : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase__ : Optional[int] =figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase__ : Any =get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase__ : int =get_color_coded_rgb(__lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] =get_black_and_white_rgb(__lowerCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 625 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ):
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
# Base Case
if curr_ind == len(__lowerCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__lowerCamelCase ) ):
if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Insert current vertex into path as next transition
lowerCamelCase__ : Tuple =next_ver
# Validate created path
if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase__ : int =-1
return False
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase__ : Union[str, Any] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
| 625 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =VideoMAEConfig()
set_architecture_configs(__lowerCamelCase , __lowerCamelCase )
if "finetuned" not in model_name:
lowerCamelCase__ : int =False
if "finetuned" in model_name:
lowerCamelCase__ : str ='''huggingface/label-files'''
if "kinetics" in model_name:
lowerCamelCase__ : List[Any] =400
lowerCamelCase__ : Optional[int] ='''kinetics400-id2label.json'''
elif "ssv2" in model_name:
lowerCamelCase__ : Tuple =174
lowerCamelCase__ : Optional[Any] ='''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : List[Any] ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict =idalabel
lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()}
return config
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if "small" in model_name:
lowerCamelCase__ : Optional[Any] =384
lowerCamelCase__ : List[Any] =1536
lowerCamelCase__ : int =12
lowerCamelCase__ : Dict =16
lowerCamelCase__ : List[Any] =12
lowerCamelCase__ : Optional[Any] =3
lowerCamelCase__ : Union[str, Any] =192
lowerCamelCase__ : str =768
elif "large" in model_name:
lowerCamelCase__ : Union[str, Any] =1024
lowerCamelCase__ : str =4096
lowerCamelCase__ : int =24
lowerCamelCase__ : Dict =16
lowerCamelCase__ : Union[str, Any] =12
lowerCamelCase__ : List[Any] =8
lowerCamelCase__ : int =512
lowerCamelCase__ : Optional[Any] =2048
elif "huge" in model_name:
lowerCamelCase__ : Optional[int] =1280
lowerCamelCase__ : Optional[int] =5120
lowerCamelCase__ : List[Any] =32
lowerCamelCase__ : List[Any] =16
lowerCamelCase__ : Optional[Any] =12
lowerCamelCase__ : Dict =8
lowerCamelCase__ : List[Any] =640
lowerCamelCase__ : Any =2560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
if "encoder." in name:
lowerCamelCase__ : Optional[int] =name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
lowerCamelCase__ : List[Any] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase__ : Any =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ : List[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowerCamelCase__ : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : int =name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowerCamelCase__ : Any =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowerCamelCase__ : Any =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : str =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
lowerCamelCase__ : List[str] =name.replace('''head''' , '''classifier''' )
return name
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase )
if key.startswith('''encoder.''' ):
lowerCamelCase__ : Optional[int] =key.replace('''encoder.''' , '''''' )
if "qkv" in key:
lowerCamelCase__ : Any =key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
lowerCamelCase__ : Tuple =config.decoder_hidden_size
lowerCamelCase__ : str =int(key_split[2] )
lowerCamelCase__ : Any ='''decoder.decoder_layers.'''
if "weight" in key:
lowerCamelCase__ : List[Any] =val[:dim, :]
lowerCamelCase__ : Any =val[dim : dim * 2, :]
lowerCamelCase__ : Dict =val[-dim:, :]
else:
lowerCamelCase__ : Optional[Any] =config.hidden_size
lowerCamelCase__ : Optional[Any] =int(key_split[1] )
lowerCamelCase__ : str ='''videomae.encoder.layer.'''
if "weight" in key:
lowerCamelCase__ : int =val[:dim, :]
lowerCamelCase__ : Tuple =val[dim : dim * 2, :]
lowerCamelCase__ : List[Any] =val[-dim:, :]
else:
lowerCamelCase__ : int =val
return orig_state_dict
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCamelCase__ : Optional[Any] =np.load(__lowerCamelCase )
return list(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : str =get_videomae_config(__lowerCamelCase )
if "finetuned" in model_name:
lowerCamelCase__ : Tuple =VideoMAEForVideoClassification(__lowerCamelCase )
else:
lowerCamelCase__ : int =VideoMAEForPreTraining(__lowerCamelCase )
# download original checkpoint, hosted on Google Drive
lowerCamelCase__ : Union[str, Any] ='''pytorch_model.bin'''
gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )
if "model" in files:
lowerCamelCase__ : Dict =files['''model''']
else:
lowerCamelCase__ : str =files['''module''']
lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify model on basic input
lowerCamelCase__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCamelCase__ : int =prepare_video()
lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' )
if "finetuned" not in model_name:
lowerCamelCase__ : Tuple =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase )
lowerCamelCase__ : int =model(**__lowerCamelCase )
lowerCamelCase__ : Dict =outputs.logits
lowerCamelCase__ : List[str] =[
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCamelCase__ : int =torch.Size([1, 174] )
lowerCamelCase__ : Dict =torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
lowerCamelCase__ : List[str] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
lowerCamelCase__ : List[Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[str] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCamelCase__ : str =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[Any] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : Optional[int] =torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCamelCase__ : List[str] =torch.Size([1, 400] )
lowerCamelCase__ : Dict =torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCamelCase__ : str =torch.Size([1, 400] )
lowerCamelCase__ : Any =torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
lowerCamelCase__ : Tuple =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCamelCase__ : Optional[int] =torch.Size([1, 174] )
lowerCamelCase__ : Any =torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : str =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCamelCase__ : str =torch.Size([1, 174] )
lowerCamelCase__ : int =torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCamelCase__ : str =outputs.loss
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__lowerCamelCase , organization='''nielsr''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 625 | 1 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int], *lowerCamelCase : Union[str, Any], **lowerCamelCase : Optional[Any] )-> Any:
super().__init__(*lowerCamelCase, **lowerCamelCase )
self.check_model_type(lowerCamelCase )
def snake_case ( self : Tuple, lowerCamelCase : Optional[Any]=None, lowerCamelCase : str=None, lowerCamelCase : int=None, **lowerCamelCase : Union[str, Any] )-> str:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] ={}, {}
if padding is not None:
lowerCamelCase__ : str =padding
if truncation is not None:
lowerCamelCase__ : Any =truncation
if top_k is not None:
lowerCamelCase__ : Dict =top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[Any], lowerCamelCase : Union["Image.Image", str], lowerCamelCase : str = None, **lowerCamelCase : Tuple )-> int:
if isinstance(lowerCamelCase, (Image.Image, str) ) and isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[str] ={'''image''': image, '''question''': question}
else:
lowerCamelCase__ : Union[str, Any] =image
lowerCamelCase__ : int =super().__call__(lowerCamelCase, **lowerCamelCase )
return results
def snake_case ( self : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : List[Any]=False, lowerCamelCase : Optional[Any]=False )-> int:
lowerCamelCase__ : Any =load_image(inputs['''image'''] )
lowerCamelCase__ : str =self.tokenizer(
inputs['''question'''], return_tensors=self.framework, padding=lowerCamelCase, truncation=lowerCamelCase )
lowerCamelCase__ : Tuple =self.image_processor(images=lowerCamelCase, return_tensors=self.framework )
model_inputs.update(lowerCamelCase )
return model_inputs
def snake_case ( self : List[Any], lowerCamelCase : int )-> int:
lowerCamelCase__ : Any =self.model(**lowerCamelCase )
return model_outputs
def snake_case ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : List[Any]=5 )-> Tuple:
if top_k > self.model.config.num_labels:
lowerCamelCase__ : Tuple =self.model.config.num_labels
if self.framework == "pt":
lowerCamelCase__ : str =model_outputs.logits.sigmoid()[0]
lowerCamelCase__ , lowerCamelCase__ : Tuple =probs.topk(lowerCamelCase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowerCamelCase__ : Tuple =scores.tolist()
lowerCamelCase__ : Dict =ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase, lowerCamelCase )]
| 625 |
"""simple docstring"""
_lowercase : str = 0 # The first color of the flag.
_lowercase : Dict = 1 # The second color of the flag.
_lowercase : Tuple = 2 # The third color of the flag.
_lowercase : Optional[int] = (red, white, blue)
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
if not sequence:
return []
if len(__lowerCamelCase ) == 1:
return list(__lowerCamelCase )
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Dict =len(__lowerCamelCase ) - 1
lowerCamelCase__ : Tuple =0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =sequence[high], sequence[mid]
high -= 1
else:
lowerCamelCase__ : Dict =f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[Any] = input("Enter numbers separated by commas:\n").strip()
_lowercase : int = [int(item.strip()) for item in user_input.split(",")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 625 | 1 |
"""simple docstring"""
from manim import *
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def snake_case ( self : str )-> Any:
lowerCamelCase__ : Any =Rectangle(height=0.5, width=0.5 )
lowerCamelCase__ : Optional[int] =Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ : int =[mem.copy() for i in range(6 )]
lowerCamelCase__ : int =[mem.copy() for i in range(6 )]
lowerCamelCase__ : Optional[Any] =VGroup(*lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowerCamelCase__ : str =VGroup(*lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowerCamelCase__ : List[Any] =VGroup(lowerCamelCase, lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowerCamelCase__ : Any =Text('''CPU''', font_size=24 )
lowerCamelCase__ : List[Any] =Group(lowerCamelCase, lowerCamelCase ).arrange(lowerCamelCase, buff=0.5, aligned_edge=lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase )
lowerCamelCase__ : str =[mem.copy() for i in range(1 )]
lowerCamelCase__ : List[Any] =VGroup(*lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowerCamelCase__ : Union[str, Any] =Text('''GPU''', font_size=24 )
lowerCamelCase__ : Dict =Group(lowerCamelCase, lowerCamelCase ).arrange(lowerCamelCase, buff=0.5, aligned_edge=lowerCamelCase )
gpu.align_to(lowerCamelCase, lowerCamelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase )
lowerCamelCase__ : Dict =[mem.copy() for i in range(6 )]
lowerCamelCase__ : str =VGroup(*lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowerCamelCase__ : List[str] =Text('''Model''', font_size=24 )
lowerCamelCase__ : Tuple =Group(lowerCamelCase, lowerCamelCase ).arrange(lowerCamelCase, buff=0.5, aligned_edge=lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase, run_time=1 ), Create(lowerCamelCase, run_time=1 ), Create(lowerCamelCase, run_time=1 ), )
lowerCamelCase__ : str =MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''', font_size=24, )
lowerCamelCase__ : Tuple =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ : Union[str, Any] =MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=18, )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase, run_time=2.5 ), Write(lowerCamelCase ), Write(lowerCamelCase ) )
self.add(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =[]
lowerCamelCase__ : Optional[Any] =[]
lowerCamelCase__ : Optional[Any] =[]
for i, rect in enumerate(lowerCamelCase ):
lowerCamelCase__ : Dict =Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase, opacity=0.7 )
cpu_target.move_to(lowerCamelCase )
cpu_target.generate_target()
lowerCamelCase__ : List[str] =0.46 / 4
lowerCamelCase__ : Tuple =0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=lowerCamelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target, direction=lowerCamelCase, buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target, direction=lowerCamelCase, buff=0.0 )
cpu_targs.append(lowerCamelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase ) )
second_animations.append(MoveToTarget(lowerCamelCase, run_time=1.5 ) )
self.play(*lowerCamelCase )
self.play(*lowerCamelCase )
self.wait()
| 625 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = StableUnCLIPImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def snake_case ( self : List[str] )-> str:
lowerCamelCase__ : Dict =32
lowerCamelCase__ : Optional[Any] =embedder_hidden_size
# image encoding components
lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase__ : Tuple =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowerCamelCase__ : Dict =UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =DDIMScheduler(
beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =AutoencoderKL()
lowerCamelCase__ : int ={
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
lowerCamelCase__ : int =input_image * 0.5 + 0.5
lowerCamelCase__ : Dict =input_image.clamp(0, 1 )
lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase )
lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase )
inputs.update({'''image_embeds''': None} )
lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images
lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def snake_case ( self : List[str] )-> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : List[Any] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : Tuple =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : List[Any] =pipe(
lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', )
lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 625 | 1 |
"""simple docstring"""
from math import factorial
_lowercase : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCamelCase ) )
def snake_case__ ( __lowerCamelCase : int = 60 , __lowerCamelCase : int = 1000000 ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
lowerCamelCase__ : Tuple =0
# the cached sizes of the previous chains
lowerCamelCase__ : dict[int, int] ={}
for start_chain_element in range(1 , __lowerCamelCase ):
# The temporary set will contain the elements of the chain
lowerCamelCase__ : int =set()
lowerCamelCase__ : Optional[int] =0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase__ : Optional[int] =start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__lowerCamelCase )
chain_set_length += 1
lowerCamelCase__ : Tuple =digit_factorial_sum(__lowerCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase__ : List[str] =chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution()}')
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 4000000 ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b
return sum(__lowerCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 625 | 1 |
"""simple docstring"""
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 625 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = BlenderbotSmallConfig
_a = {}
_a = 'gelu'
def __init__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=False, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : str=32, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=20, lowerCamelCase : int=2, lowerCamelCase : Any=1, lowerCamelCase : Optional[Any]=0, )-> List[str]:
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Tuple =is_training
lowerCamelCase__ : Dict =use_labels
lowerCamelCase__ : List[Any] =vocab_size
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : str =num_hidden_layers
lowerCamelCase__ : Union[str, Any] =num_attention_heads
lowerCamelCase__ : Any =intermediate_size
lowerCamelCase__ : Dict =hidden_dropout_prob
lowerCamelCase__ : List[Any] =attention_probs_dropout_prob
lowerCamelCase__ : str =max_position_embeddings
lowerCamelCase__ : Optional[int] =eos_token_id
lowerCamelCase__ : str =pad_token_id
lowerCamelCase__ : Union[str, Any] =bos_token_id
def snake_case ( self : Any )-> Any:
lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCamelCase__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Any =tf.concat([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : int =self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCamelCase__ : Optional[int] =prepare_blenderbot_small_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return config, inputs_dict
def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Any )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder()
lowerCamelCase__ : List[Any] =inputs_dict['''input_ids''']
lowerCamelCase__ : Optional[int] =input_ids[:1, :]
lowerCamelCase__ : str =inputs_dict['''attention_mask'''][:1, :]
lowerCamelCase__ : Union[str, Any] =inputs_dict['''head_mask''']
lowerCamelCase__ : Optional[Any] =1
# first forward pass
lowerCamelCase__ : Dict =model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : List[str] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowerCamelCase__ : List[str] =tf.concat([input_ids, next_tokens], axis=-1 )
lowerCamelCase__ : str =tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase, attention_mask=lowerCamelCase )[0]
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : Tuple =int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowerCamelCase__ : int =output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : List[str] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-3 )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : List[str] =tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : str =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_a = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_a = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a = True
_a = False
_a = False
def snake_case ( self : Any )-> str:
lowerCamelCase__ : Tuple =TFBlenderbotSmallModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase )
def snake_case ( self : Any )-> Optional[int]:
self.config_tester.run_common_tests()
def snake_case ( self : int )-> str:
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
_a = 'facebook/blenderbot_small-90M'
@cached_property
def snake_case ( self : Any )-> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Dict =self.tokenizer(self.src_text, return_tensors='''tf''' )
lowerCamelCase__ : Any =self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase, )
lowerCamelCase__ : Any =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 625 | 1 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : List[Any]=32, lowerCamelCase : Dict=3, lowerCamelCase : int=4, lowerCamelCase : str=[10, 20, 30, 40], lowerCamelCase : Any=[2, 2, 3, 2], lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : str=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[int]=10, lowerCamelCase : Any=0.02, lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"], lowerCamelCase : Optional[int]=3, lowerCamelCase : Tuple=None, )-> List[str]:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Tuple =batch_size
lowerCamelCase__ : str =image_size
lowerCamelCase__ : Any =num_channels
lowerCamelCase__ : Tuple =num_stages
lowerCamelCase__ : List[str] =hidden_sizes
lowerCamelCase__ : Any =depths
lowerCamelCase__ : Union[str, Any] =is_training
lowerCamelCase__ : Tuple =use_labels
lowerCamelCase__ : int =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : Dict =type_sequence_label_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Any =out_features
lowerCamelCase__ : Tuple =num_labels
lowerCamelCase__ : Optional[int] =scope
lowerCamelCase__ : Optional[int] =num_stages
def snake_case ( self : str )-> Optional[int]:
lowerCamelCase__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple =None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int =self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] )-> Any:
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def snake_case ( self : Union[str, Any] )-> Any:
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowerCamelCase, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowerCamelCase, loss_ignore_index=255, num_labels=self.num_labels, )
def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ : List[str] =UperNetForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Dict =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any =config_and_inputs
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_a = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =UperNetModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Optional[int] )-> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] )-> Dict:
return
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
lowerCamelCase__ : Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple =[*signature.parameters.keys()]
lowerCamelCase__ : List[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def snake_case ( self : Any )-> Union[str, Any]:
lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def snake_case ( self : Optional[Any] )-> List[Any]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def snake_case ( self : Any )-> List[str]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : int )-> Any:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : Dict )-> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case ( self : List[Any] )-> List[str]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : Tuple )-> str:
pass
def snake_case ( self : Optional[int] )-> List[str]:
def check_hidden_states_output(lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[str] ):
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : List[str] =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : str =_config_zero_init(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =_config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def snake_case ( self : Any )-> str:
pass
@slow
def snake_case ( self : int )-> Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : str =UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
lowerCamelCase__ : List[str] =Image.open(__lowerCamelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : str )-> Union[str, Any]:
lowerCamelCase__ : List[Any] =AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
lowerCamelCase__ : List[Any] =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : List[Any] =prepare_img()
lowerCamelCase__ : List[Any] =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : Dict =torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
lowerCamelCase__ : Tuple =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : Dict =prepare_img()
lowerCamelCase__ : Any =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : Any =model(**lowerCamelCase )
lowerCamelCase__ : Dict =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : List[str] =torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ):
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
# Base Case
if curr_ind == len(__lowerCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__lowerCamelCase ) ):
if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Insert current vertex into path as next transition
lowerCamelCase__ : Tuple =next_ver
# Validate created path
if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase__ : int =-1
return False
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase__ : Union[str, Any] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
| 625 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'dandelin/vilt-b32-finetuned-vqa'
_a = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
_a = 'image_qa'
_a = AutoProcessor
_a = AutoModelForVisualQuestionAnswering
_a = ['image', 'text']
_a = ['text']
def __init__( self : Dict, *lowerCamelCase : Optional[int], **lowerCamelCase : int )-> Optional[Any]:
requires_backends(self, ['''vision'''] )
super().__init__(*lowerCamelCase, **lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : "Image", lowerCamelCase : str )-> List[Any]:
return self.pre_processor(lowerCamelCase, lowerCamelCase, return_tensors='''pt''' )
def snake_case ( self : Union[str, Any], lowerCamelCase : Dict )-> List[str]:
with torch.no_grad():
return self.model(**lowerCamelCase ).logits
def snake_case ( self : List[Any], lowerCamelCase : List[Any] )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 625 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase : List[str] = 2_5_0_0_0_4
_lowercase : Optional[Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = MBartTokenizer
_a = MBartTokenizerFast
_a = True
_a = True
def snake_case ( self : Tuple )-> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Union[str, Any] =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Dict )-> Union[str, Any]:
lowerCamelCase__ : Any =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
lowerCamelCase__ : List[Any] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowerCamelCase__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
lowerCamelCase__ : str =tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def snake_case ( self : Tuple )-> List[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ : int =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[str] =tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase__ : List[str] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Any =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ : Dict =tempfile.mkdtemp()
lowerCamelCase__ : List[str] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Tuple =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Optional[int] =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Any =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ : Optional[int] =tempfile.mkdtemp()
lowerCamelCase__ : int =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Dict =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : int =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = 'facebook/mbart-large-en-ro'
_a = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_a = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_a = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case ( cls : List[Any] )-> Optional[int]:
lowerCamelCase__ : MBartTokenizer =MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' )
lowerCamelCase__ : Optional[int] =1
return cls
def snake_case ( self : Optional[Any] )-> List[str]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_0020 )
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
def snake_case ( self : Optional[Any] )-> str:
self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids )
lowerCamelCase__ : Optional[int] =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Optional[int] =['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0], lowerCamelCase )
lowerCamelCase__ : Dict =10
lowerCamelCase__ : Optional[int] =self.tokenizer(lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], lowerCamelCase )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
def snake_case ( self : int )-> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_0026, 25_0001] )
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : int =tempfile.mkdtemp()
lowerCamelCase__ : Optional[int] =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =MBartTokenizer.from_pretrained(lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase )
@require_torch
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ : Optional[Any] =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, return_tensors='''pt''' )
lowerCamelCase__ : Dict =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case ( self : Optional[Any] )-> Any:
lowerCamelCase__ : str =self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', )
lowerCamelCase__ : List[Any] =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
lowerCamelCase__ : Any =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case ( self : List[Any] )-> Dict:
lowerCamelCase__ : Any =self.tokenizer(self.src_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=3, return_tensors='''pt''' )
lowerCamelCase__ : Tuple =self.tokenizer(
text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=10, return_tensors='''pt''' )
lowerCamelCase__ : Union[str, Any] =targets['''input_ids''']
lowerCamelCase__ : List[Any] =shift_tokens_right(lowerCamelCase, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : str =self.tokenizer._build_translation_inputs(
'''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase ), {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 25_0004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
}, )
| 625 | 1 |
"""simple docstring"""
import os
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =len(grid[0] )
lowerCamelCase__ : Tuple =len(__lowerCamelCase )
lowerCamelCase__ : Any =0
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : List[Any] =0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__lowerCamelCase ):
for j in range(n_rows - 3 ):
lowerCamelCase__ : Dict =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowerCamelCase__ : Dict =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowerCamelCase__ : Union[str, Any] =(
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowerCamelCase__ : Dict =(
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowerCamelCase__ : Dict =max(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if max_product > largest:
lowerCamelCase__ : Tuple =max_product
return largest
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =[]
with open(os.path.dirname(__lowerCamelCase ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
lowerCamelCase__ : Dict =[[int(__lowerCamelCase ) for i in grid[j]] for j in range(len(__lowerCamelCase ) )]
return largest_product(__lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 625 | 1 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = (UnCLIPScheduler,)
def snake_case ( self : Tuple, **lowerCamelCase : Union[str, Any] )-> int:
lowerCamelCase__ : Tuple ={
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**lowerCamelCase )
return config
def snake_case ( self : Optional[Any] )-> List[str]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def snake_case ( self : str )-> Tuple:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCamelCase )
def snake_case ( self : List[Any] )-> Dict:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase )
def snake_case ( self : Any )-> Union[str, Any]:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowerCamelCase )
def snake_case ( self : int )-> int:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def snake_case ( self : List[str] )-> Optional[Any]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCamelCase, prev_timestep=lowerCamelCase )
def snake_case ( self : Optional[Any] )-> List[str]:
lowerCamelCase__ : Dict =self.scheduler_classes[0]
lowerCamelCase__ : Tuple =self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowerCamelCase__ : Optional[Any] =scheduler_class(**lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def snake_case ( self : Union[str, Any] )-> Union[str, Any]:
lowerCamelCase__ : Tuple =self.scheduler_classes[0]
lowerCamelCase__ : Optional[int] =self.get_scheduler_config(variance_type='''learned_range''' )
lowerCamelCase__ : Optional[int] =scheduler_class(**lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =0.5
assert scheduler._get_variance(1, predicted_variance=lowerCamelCase ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487, predicted_variance=lowerCamelCase ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999, predicted_variance=lowerCamelCase ) - -0.0_010_011 < 1E-5
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : Tuple =self.scheduler_classes[0]
lowerCamelCase__ : int =self.get_scheduler_config()
lowerCamelCase__ : int =scheduler_class(**lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =scheduler.timesteps
lowerCamelCase__ : List[str] =self.dummy_model()
lowerCamelCase__ : List[str] =self.dummy_sample_deter
lowerCamelCase__ : List[Any] =torch.manual_seed(0 )
for i, t in enumerate(lowerCamelCase ):
# 1. predict noise residual
lowerCamelCase__ : List[str] =model(lowerCamelCase, lowerCamelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ : Tuple =scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, generator=lowerCamelCase ).prev_sample
lowerCamelCase__ : str =pred_prev_sample
lowerCamelCase__ : List[str] =torch.sum(torch.abs(lowerCamelCase ) )
lowerCamelCase__ : Union[str, Any] =torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : List[str] =self.scheduler_classes[0]
lowerCamelCase__ : Optional[Any] =self.get_scheduler_config()
lowerCamelCase__ : Dict =scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(25 )
lowerCamelCase__ : Any =scheduler.timesteps
lowerCamelCase__ : Optional[int] =self.dummy_model()
lowerCamelCase__ : Dict =self.dummy_sample_deter
lowerCamelCase__ : List[Any] =torch.manual_seed(0 )
for i, t in enumerate(lowerCamelCase ):
# 1. predict noise residual
lowerCamelCase__ : List[Any] =model(lowerCamelCase, lowerCamelCase )
if i + 1 == timesteps.shape[0]:
lowerCamelCase__ : Tuple =None
else:
lowerCamelCase__ : str =timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ : Union[str, Any] =scheduler.step(
lowerCamelCase, lowerCamelCase, lowerCamelCase, prev_timestep=lowerCamelCase, generator=lowerCamelCase ).prev_sample
lowerCamelCase__ : List[str] =pred_prev_sample
lowerCamelCase__ : List[str] =torch.sum(torch.abs(lowerCamelCase ) )
lowerCamelCase__ : int =torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def snake_case ( self : int )-> Optional[int]:
pass
def snake_case ( self : str )-> str:
pass
| 625 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 22 ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =range(1 , __lowerCamelCase )
lowerCamelCase__ : str =range(1 , __lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }')
| 625 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
_lowercase : Optional[int] = TypeVar("T")
_lowercase : List[str] = TypeVar("U")
class __SCREAMING_SNAKE_CASE ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Union[str, Any], lowerCamelCase : T | None, lowerCamelCase : U | None )-> Optional[int]:
lowerCamelCase__ : Dict =key
lowerCamelCase__ : List[str] =val
lowerCamelCase__ : DoubleLinkedListNode[T, U] | None =None
lowerCamelCase__ : DoubleLinkedListNode[T, U] | None =None
def __repr__( self : Any )-> str:
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class __SCREAMING_SNAKE_CASE ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Union[str, Any] )-> None:
lowerCamelCase__ : DoubleLinkedListNode[T, U] =DoubleLinkedListNode(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : DoubleLinkedListNode[T, U] =DoubleLinkedListNode(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.rear, self.head
def __repr__( self : Any )-> str:
lowerCamelCase__ : str =['''DoubleLinkedList''']
lowerCamelCase__ : Any =self.head
while node.next is not None:
rep.append(str(lowerCamelCase ) )
lowerCamelCase__ : Dict =node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase )
def snake_case ( self : Union[str, Any], lowerCamelCase : DoubleLinkedListNode[T, U] )-> None:
lowerCamelCase__ : Dict =self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowerCamelCase__ : Dict =node
lowerCamelCase__ : Union[str, Any] =previous
lowerCamelCase__ : Dict =node
lowerCamelCase__ : Optional[Any] =self.rear
def snake_case ( self : Dict, lowerCamelCase : DoubleLinkedListNode[T, U] )-> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
lowerCamelCase__ : Tuple =node.next
lowerCamelCase__ : int =node.prev
lowerCamelCase__ : Tuple =None
lowerCamelCase__ : int =None
return node
class __SCREAMING_SNAKE_CASE ( Generic[T, U] ):
'''simple docstring'''
_a = {}
def __init__( self : int, lowerCamelCase : int )-> str:
lowerCamelCase__ : DoubleLinkedList[T, U] =DoubleLinkedList()
lowerCamelCase__ : Union[str, Any] =capacity
lowerCamelCase__ : Optional[Any] =0
lowerCamelCase__ : Any =0
lowerCamelCase__ : Tuple =0
lowerCamelCase__ : dict[T, DoubleLinkedListNode[T, U]] ={}
def __repr__( self : Union[str, Any] )-> str:
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self : Optional[int], lowerCamelCase : T )-> bool:
return key in self.cache
def snake_case ( self : Optional[Any], lowerCamelCase : T )-> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
lowerCamelCase__ : DoubleLinkedListNode[T, U] =self.cache[key]
lowerCamelCase__ : Optional[int] =self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase )
return node.val
self.miss += 1
return None
def snake_case ( self : Any, lowerCamelCase : T, lowerCamelCase : U )-> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowerCamelCase__ : Union[str, Any] =self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowerCamelCase__ : int =DoubleLinkedListNode(lowerCamelCase, lowerCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowerCamelCase__ : List[str] =self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowerCamelCase__ : List[Any] =value
self.list.add(lowerCamelCase )
@classmethod
def snake_case ( cls : Optional[int], lowerCamelCase : int = 128 )-> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowerCamelCase__ : Union[str, Any] =LRUCache(lowerCamelCase )
lowerCamelCase__ : Tuple =cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowerCamelCase__ : Any =func(*lowerCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0], lowerCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase, '''cache_info''', lowerCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def snake_case ( self : Dict, lowerCamelCase : List[str], lowerCamelCase : Any )-> Union[str, Any]:
pass
def snake_case ( self : List[str] )-> List[str]:
pass
def snake_case ( self : Optional[Any] )-> str:
pass
def snake_case ( self : Union[str, Any], lowerCamelCase : np.ndarray, lowerCamelCase : np.ndarray, lowerCamelCase : float )-> Dict:
lowerCamelCase__ : Union[str, Any] =np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Any=None, **lowerCamelCase : str )-> int:
lowerCamelCase__ : List[str] =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Dict =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str=None, **lowerCamelCase : List[Any] )-> int:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=None, **lowerCamelCase : int )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Optional[int] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[Any] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : int =output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : List[str] =after_output[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-3 )
def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[str] =model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase )
lowerCamelCase__ : int =output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Tuple =to_atuple(vision_model.config.image_size )
lowerCamelCase__ : Optional[Any] =to_atuple(vision_model.config.patch_size )
lowerCamelCase__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCamelCase__ : int =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCamelCase__ : List[Any] =output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] )-> Any:
pt_model.to(lowerCamelCase )
pt_model.eval()
# prepare inputs
lowerCamelCase__ : Any =inputs_dict
lowerCamelCase__ : Any ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCamelCase__ : List[str] =pt_model(**lowerCamelCase ).to_tuple()
lowerCamelCase__ : Optional[Any] =fx_model(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase )
lowerCamelCase__ : List[Any] =fx_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : str =VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase )
pt_model_loaded.to(lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2 )
def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : str )-> List[Any]:
lowerCamelCase__ : Any =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : str =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase )
lowerCamelCase__ : Tuple =fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ : Dict =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Any =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : int =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase )
def snake_case ( self : Tuple )-> Any:
lowerCamelCase__ : Tuple =self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase )
def snake_case ( self : str )-> Any:
lowerCamelCase__ : str =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase )
@is_pt_flax_cross_test
def snake_case ( self : Tuple )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs()
lowerCamelCase__ : Union[str, Any] =config_inputs_dict.pop('''vision_config''' )
lowerCamelCase__ : Optional[Any] =config_inputs_dict.pop('''text_config''' )
lowerCamelCase__ : Tuple =config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase )
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@slow
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.get_pretrained_model_and_inputs()
lowerCamelCase__ : Optional[int] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[str] =outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase )
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[Any] =after_outputs[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-5 )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : List[str] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : List[str] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : Optional[int] =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Any ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : int )-> int:
lowerCamelCase__ : str =FlaxViTModel(lowerCamelCase )
lowerCamelCase__ : Any =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : int )-> Optional[int]:
lowerCamelCase__ : Any =FlaxViTModelTester(self )
lowerCamelCase__ : Union[str, Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =vit_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Any =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Union[str, Any] =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : Optional[Any] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : Union[str, Any] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : str =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Dict )-> Dict:
lowerCamelCase__ : str =FlaxCLIPVisionModel(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[Any] =FlaxCLIPVisionModelTester(self )
lowerCamelCase__ : List[Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =clip_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[int] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : List[Any] =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Any =FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase__ : Dict =processor(
text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' )
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
lowerCamelCase__ : Any =np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3 ) )
| 625 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.