code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCAmelCase : Dict = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 300
|
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]="shi-labs/oneformer_demo" ) -> int:
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) as f:
A_ : Optional[int] = json.load(_lowerCAmelCase )
A_ : Union[str, Any] = {}
A_ : Tuple = []
A_ : Optional[Any] = []
for key, info in class_info.items():
A_ : Tuple = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
A_ : Optional[Any] = thing_ids
A_ : int = class_names
return metadata
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :List[Any] , snake_case :List[str] , snake_case :int=7 , snake_case :Optional[int]=3 , snake_case :Union[str, Any]=30 , snake_case :Tuple=400 , snake_case :List[Any]=None , snake_case :Optional[Any]=True , snake_case :Tuple=True , snake_case :Dict=[0.5, 0.5, 0.5] , snake_case :Any=[0.5, 0.5, 0.5] , snake_case :Optional[int]=10 , snake_case :Tuple=False , snake_case :Optional[int]=255 , snake_case :Optional[Any]="shi-labs/oneformer_demo" , snake_case :Optional[Any]="ade20k_panoptic.json" , snake_case :Optional[int]=10 , ):
'''simple docstring'''
A_ : Tuple = parent
A_ : List[str] = batch_size
A_ : Optional[int] = num_channels
A_ : Tuple = min_resolution
A_ : List[Any] = max_resolution
A_ : Union[str, Any] = do_resize
A_ : Any = {"shortest_edge": 32, "longest_edge": 1_333} if size is None else size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : List[Any] = image_std
A_ : Union[str, Any] = class_info_file
A_ : List[Any] = prepare_metadata(snake_case , snake_case )
A_ : Tuple = num_text
A_ : str = repo_path
# for the post_process_functions
A_ : Any = 2
A_ : int = 10
A_ : Optional[int] = 10
A_ : Tuple = 3
A_ : Tuple = 4
A_ : str = num_labels
A_ : int = do_reduce_labels
A_ : List[Any] = ignore_index
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Any , snake_case :Any=False ):
'''simple docstring'''
if not batched:
A_ : List[str] = image_inputs[0]
if isinstance(snake_case , Image.Image ):
A_ , A_ : Dict = image.size
else:
A_ , A_ : Tuple = image.shape[1], image.shape[2]
if w < h:
A_ : str = int(self.size["shortest_edge"] * h / w )
A_ : Any = self.size["shortest_edge"]
elif w > h:
A_ : Optional[int] = self.size["shortest_edge"]
A_ : List[str] = int(self.size["shortest_edge"] * w / h )
else:
A_ : List[str] = self.size["shortest_edge"]
A_ : Optional[Any] = self.size["shortest_edge"]
else:
A_ : Tuple = []
for image in image_inputs:
A_ , A_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : Tuple = max(snake_case , key=lambda snake_case : item[0] )[0]
A_ : Union[str, Any] = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__UpperCamelCase = image_processing_class
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , "image_mean" ) )
self.assertTrue(hasattr(snake_case , "image_std" ) )
self.assertTrue(hasattr(snake_case , "do_normalize" ) )
self.assertTrue(hasattr(snake_case , "do_resize" ) )
self.assertTrue(hasattr(snake_case , "size" ) )
self.assertTrue(hasattr(snake_case , "ignore_index" ) )
self.assertTrue(hasattr(snake_case , "class_info_file" ) )
self.assertTrue(hasattr(snake_case , "num_text" ) )
self.assertTrue(hasattr(snake_case , "repo_path" ) )
self.assertTrue(hasattr(snake_case , "metadata" ) )
self.assertTrue(hasattr(snake_case , "do_reduce_labels" ) )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : str = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Optional[Any] = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : List[str] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
A_ : List[str] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : List[str] = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : int = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : Optional[Any] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
A_ : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : Any = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict=False , snake_case :str=False , snake_case :Dict="np" ):
'''simple docstring'''
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
A_ : Tuple = self.image_processing_tester.num_labels
A_ : str = None
A_ : Tuple = None
A_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
if with_segmentation_maps:
A_ : List[str] = num_labels
if is_instance_map:
A_ : List[str] = list(range(snake_case ) ) * 2
A_ : int = dict(enumerate(snake_case ) )
A_ : List[str] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
A_ : int = [Image.fromarray(snake_case ) for annotation in annotations]
A_ : List[str] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , snake_case , return_tensors="pt" , instance_id_to_semantic_id=snake_case , pad_and_return_pixel_mask=snake_case , )
return inputs
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
def common(snake_case :Dict=False , snake_case :Optional[int]=None ):
A_ : Tuple = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case , is_instance_map=snake_case , segmentation_type=snake_case )
A_ : Optional[Any] = inputs["mask_labels"]
A_ : List[Any] = inputs["class_labels"]
A_ : Optional[Any] = inputs["pixel_values"]
A_ : int = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case , snake_case , snake_case ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case )
common(is_instance_map=snake_case , segmentation_type="pil" )
common(is_instance_map=snake_case , segmentation_type="pil" )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = np.zeros((20, 50) )
A_ : List[str] = 1
A_ : int = 1
A_ : Optional[Any] = 1
A_ : Any = binary_mask_to_rle(snake_case )
self.assertEqual(len(snake_case ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : int = fature_extractor.post_process_semantic_segmentation(snake_case )
self.assertEqual(len(snake_case ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
A_ : Optional[int] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
A_ : List[Any] = fature_extractor.post_process_semantic_segmentation(snake_case , target_sizes=snake_case )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : str = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : Optional[Any] = image_processor.post_process_instance_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : Optional[Any] = image_processor.post_process_panoptic_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 300
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : Union[str, Any] = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
_lowerCAmelCase : int = {
'''gpt2''': 1_024,
'''gpt2-medium''': 1_024,
'''gpt2-large''': 1_024,
'''gpt2-xl''': 1_024,
'''distilgpt2''': 1_024,
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = GPTaTokenizer
def __init__( self :Tuple , snake_case :Optional[int]=None , snake_case :Union[str, Any]=None , snake_case :Tuple=None , snake_case :Optional[Any]="<|endoftext|>" , snake_case :List[str]="<|endoftext|>" , snake_case :Tuple="<|endoftext|>" , snake_case :Dict=False , **snake_case :Optional[int] , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , unk_token=snake_case , bos_token=snake_case , eos_token=snake_case , add_prefix_space=snake_case , **snake_case , )
A_ : List[Any] = kwargs.pop("add_bos_token" , snake_case )
A_ : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : Optional[int] = getattr(snake_case , pre_tok_state.pop("type" ) )
A_ : Any = add_prefix_space
A_ : Optional[Any] = pre_tok_class(**snake_case )
A_ : Optional[int] = add_prefix_space
def SCREAMING_SNAKE_CASE ( self :Optional[int] , *snake_case :Any , **snake_case :int ):
'''simple docstring'''
A_ : int = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , *snake_case :Optional[Any] , **snake_case :Dict ):
'''simple docstring'''
A_ : int = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
A_ : List[str] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :"Conversation" ):
'''simple docstring'''
A_ : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case , add_special_tokens=snake_case ) + [self.eos_token_id] )
if len(snake_case ) > self.model_max_length:
A_ : Dict = input_ids[-self.model_max_length :]
return input_ids
| 300
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''data2vec-vision'''
def __init__( self :int , snake_case :Optional[int]=768 , snake_case :Any=12 , snake_case :Any=12 , snake_case :Tuple=3_072 , snake_case :Any="gelu" , snake_case :Tuple=0.0 , snake_case :int=0.0 , snake_case :Any=0.02 , snake_case :str=1e-12 , snake_case :List[str]=224 , snake_case :Dict=16 , snake_case :int=3 , snake_case :int=False , snake_case :str=False , snake_case :List[Any]=False , snake_case :Optional[Any]=False , snake_case :Tuple=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Any=True , snake_case :Optional[Any]=[3, 5, 7, 11] , snake_case :Dict=[1, 2, 3, 6] , snake_case :int=True , snake_case :List[Any]=0.4 , snake_case :Any=256 , snake_case :Union[str, Any]=1 , snake_case :Union[str, Any]=False , snake_case :Any=255 , **snake_case :int , ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : Dict = hidden_size
A_ : Tuple = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Any = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Any = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Optional[Any] = initializer_range
A_ : List[str] = layer_norm_eps
A_ : str = image_size
A_ : Optional[int] = patch_size
A_ : int = num_channels
A_ : Optional[Any] = use_mask_token
A_ : Optional[Any] = use_absolute_position_embeddings
A_ : Optional[int] = use_relative_position_bias
A_ : Dict = use_shared_relative_position_bias
A_ : Any = layer_scale_init_value
A_ : Optional[Any] = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : str = use_auxiliary_head
A_ : List[Any] = auxiliary_loss_weight
A_ : List[str] = auxiliary_channels
A_ : Dict = auxiliary_num_convs
A_ : List[str] = auxiliary_concat_input
A_ : Optional[int] = semantic_loss_ignore_index
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return 1e-4
| 300
| 1
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase : List[str] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_lowerCAmelCase : str = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_lowerCAmelCase : List[Any] = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict , snake_case :List[Any] , snake_case :Any=None , snake_case :int=True , snake_case :Any=False ):
'''simple docstring'''
if rouge_types is None:
A_ : Optional[int] = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
A_ : Any = rouge_scorer.RougeScorer(rouge_types=snake_case , use_stemmer=snake_case )
if use_aggregator:
A_ : Optional[int] = scoring.BootstrapAggregator()
else:
A_ : Optional[Any] = []
for ref, pred in zip(snake_case , snake_case ):
A_ : Any = scorer.score(snake_case , snake_case )
if use_aggregator:
aggregator.add_scores(snake_case )
else:
scores.append(snake_case )
if use_aggregator:
A_ : Union[str, Any] = aggregator.aggregate()
else:
A_ : Dict = {}
for key in scores[0]:
A_ : str = [score[key] for score in scores]
return result
| 300
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = ['''input_features''', '''attention_mask''']
def __init__( self :int , snake_case :int=80 , snake_case :Optional[int]=16_000 , snake_case :Tuple=0.0 , snake_case :Optional[int]=10 , snake_case :Optional[Any]=25 , snake_case :Dict="hamming_window" , snake_case :Tuple=32768.0 , snake_case :str=0.97 , snake_case :List[str]=1.0 , snake_case :Dict=True , snake_case :str=True , snake_case :Optional[Any]=False , **snake_case :Union[str, Any] , ):
'''simple docstring'''
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
A_ : Union[str, Any] = feature_size
A_ : int = sampling_rate
A_ : str = padding_value
A_ : int = hop_length
A_ : List[str] = win_length
A_ : Any = frame_signal_scale
A_ : str = preemphasis_coeff
A_ : List[str] = mel_floor
A_ : str = normalize_means
A_ : Any = normalize_vars
A_ : Optional[Any] = win_function
A_ : Dict = return_attention_mask
A_ : List[str] = win_length * sampling_rate // 1_000
A_ : List[str] = hop_length * sampling_rate // 1_000
A_ : List[str] = optimal_fft_length(self.sample_size )
A_ : str = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
A_ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case )
else:
A_ : List[str] = window_function(window_length=self.sample_size , name=self.win_function )
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
A_ : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=snake_case , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case , preemphasis=self.preemphasis_coeff , mel_filters=snake_case , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :str ):
'''simple docstring'''
if self.normalize_means:
A_ : int = x[:input_length].mean(axis=0 )
A_ : Any = np.subtract(snake_case , snake_case )
if self.normalize_vars:
A_ : List[Any] = x[:input_length].std(axis=0 )
A_ : Optional[int] = np.divide(snake_case , snake_case )
if input_length < x.shape[0]:
A_ : Optional[int] = padding_value
# make sure array is in float32
A_ : Union[str, Any] = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[np.ndarray] , snake_case :Optional[np.ndarray] = None ):
'''simple docstring'''
A_ : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case , snake_case , self.padding_value ) for x, n in zip(snake_case , snake_case )]
def __call__( self :int , snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case :Union[bool, str, PaddingStrategy] = False , snake_case :Optional[int] = None , snake_case :bool = False , snake_case :Optional[int] = None , snake_case :Optional[bool] = None , snake_case :Optional[Union[str, TensorType]] = None , snake_case :Optional[int] = None , **snake_case :Dict , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
A_ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
A_ : Optional[Any] = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : List[Any] = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
A_ : int = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Tuple = [raw_speech]
# extract fbank features
A_ : int = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
A_ : Union[str, Any] = BatchFeature({"input_features": features} )
A_ : str = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
# make sure list is in array format
A_ : Optional[int] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , snake_case ):
A_ : Union[str, Any] = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features]
A_ : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
A_ : Any = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
A_ : Dict = (
np.array(snake_case , dtype=np.intaa )
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
A_ : Optional[int] = self.normalize(
padded_inputs["input_features"] , attention_mask=snake_case )
if return_tensors is not None:
A_ : Dict = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 300
| 1
|
_lowerCAmelCase : dict[tuple[int, int, int], int] = {}
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
A_ : int = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
A_ : Union[str, Any] = _calculate(days - 1 , _lowerCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
A_ : Any = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
A_ : int = _calculate(days - 1 , _lowerCAmelCase , 0 )
A_ : Optional[int] = state_late + state_absent + state_ontime
A_ : Tuple = prizestrings
return prizestrings
def __snake_case ( _lowerCAmelCase : int = 30 ) -> int:
return _calculate(_lowerCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 300
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self :List[Any] , snake_case :int , snake_case :int , snake_case :Optional[int] = None , snake_case :int = 50_257 , snake_case :int = 1_024 , snake_case :int = 768 , snake_case :int = 12 , snake_case :int = 12 , snake_case :Optional[int] = None , snake_case :str = "gelu_new" , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 1e-5 , snake_case :float = 0.02 , snake_case :bool = True , snake_case :bool = True , snake_case :bool = False , snake_case :bool = False , ):
'''simple docstring'''
super().__init__()
A_ : Tuple = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
A_ : List[Any] = prefix_inner_dim
A_ : Union[str, Any] = prefix_hidden_dim
A_ : List[str] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A_ : List[Any] = (
nn.Linear(self.prefix_hidden_dim , snake_case ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A_ : List[Any] = GPTaConfig(
vocab_size=snake_case , n_positions=snake_case , n_embd=snake_case , n_layer=snake_case , n_head=snake_case , n_inner=snake_case , activation_function=snake_case , resid_pdrop=snake_case , embd_pdrop=snake_case , attn_pdrop=snake_case , layer_norm_epsilon=snake_case , initializer_range=snake_case , scale_attn_weights=snake_case , use_cache=snake_case , scale_attn_by_inverse_layer_idx=snake_case , reorder_and_upcast_attn=snake_case , )
A_ : Optional[Any] = GPTaLMHeadModel(snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.Tensor , snake_case :torch.Tensor , snake_case :Optional[torch.Tensor] = None , snake_case :Optional[torch.Tensor] = None , ):
'''simple docstring'''
A_ : Any = self.transformer.transformer.wte(snake_case )
A_ : str = self.encode_prefix(snake_case )
A_ : Union[str, Any] = self.decode_prefix(snake_case )
A_ : int = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A_ : Dict = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A_ : int = torch.cat((dummy_token, input_ids) , dim=1 )
A_ : Union[str, Any] = self.transformer(inputs_embeds=snake_case , labels=snake_case , attention_mask=snake_case )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE ( self :str , snake_case :int , snake_case :torch.device ):
'''simple docstring'''
return torch.zeros(snake_case , self.prefix_length , dtype=torch.intaa , device=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :int ):
'''simple docstring'''
return self.encode_prefix(snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Dict , snake_case :Optional[int] , snake_case :Any ):
'''simple docstring'''
A_ : Any = torch.split(snake_case , 1 , dim=0 )
A_ : Optional[int] = []
A_ : Union[str, Any] = []
for feature in features:
A_ : Tuple = self.decode_prefix(feature.to(snake_case ) ) # back to the clip feature
# Only support beam search for now
A_ , A_ : Dict = self.generate_beam(
input_embeds=snake_case , device=snake_case , eos_token_id=snake_case )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A_ : int = torch.stack(snake_case )
A_ : int = torch.stack(snake_case )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int=None , snake_case :str=None , snake_case :int=None , snake_case :int = 5 , snake_case :int = 67 , snake_case :float = 1.0 , snake_case :Optional[int] = None , ):
'''simple docstring'''
A_ : Optional[Any] = eos_token_id
A_ : List[Any] = None
A_ : List[Any] = None
A_ : str = torch.ones(snake_case , device=snake_case , dtype=torch.int )
A_ : Any = torch.zeros(snake_case , device=snake_case , dtype=torch.bool )
if input_embeds is not None:
A_ : Any = input_embeds
else:
A_ : Optional[Any] = self.transformer.transformer.wte(snake_case )
for i in range(snake_case ):
A_ : Optional[Any] = self.transformer(inputs_embeds=snake_case )
A_ : str = outputs.logits
A_ : int = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A_ : List[str] = logits.softmax(-1 ).log()
if scores is None:
A_ , A_ : Union[str, Any] = logits.topk(snake_case , -1 )
A_ : Tuple = generated.expand(snake_case , *generated.shape[1:] )
A_ , A_ : str = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A_ : Union[str, Any] = next_tokens
else:
A_ : List[str] = tokens.expand(snake_case , *tokens.shape[1:] )
A_ : Union[str, Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
A_ : List[str] = -float(np.inf )
A_ : List[Any] = 0
A_ : Union[str, Any] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A_ : Optional[Any] = scores_sum / seq_lengths[:, None]
A_ , A_ : List[str] = scores_sum_average.view(-1 ).topk(snake_case , -1 )
A_ : str = next_tokens // scores_sum.shape[1]
A_ : Union[str, Any] = seq_lengths[next_tokens_source]
A_ : Optional[int] = next_tokens % scores_sum.shape[1]
A_ : Tuple = next_tokens.unsqueeze(1 )
A_ : Tuple = tokens[next_tokens_source]
A_ : Dict = torch.cat((tokens, next_tokens) , dim=1 )
A_ : Dict = generated[next_tokens_source]
A_ : Union[str, Any] = scores_sum_average * seq_lengths
A_ : Optional[int] = is_stopped[next_tokens_source]
A_ : Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A_ : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1 )
A_ : Any = is_stopped + next_tokens.eq(snake_case ).squeeze()
if is_stopped.all():
break
A_ : int = scores / seq_lengths
A_ : str = scores.argsort(descending=snake_case )
# tokens tensors are already padded to max_seq_length
A_ : Dict = [tokens[i] for i in order]
A_ : int = torch.stack(snake_case , dim=0 )
A_ : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 300
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : int = {'''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : Optional[int] = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = None
def __init__( self :Union[str, Any] , snake_case :Optional[Any]=None , snake_case :List[Any]=None , snake_case :Any=None , snake_case :List[str]="<unk>" , snake_case :Optional[int]="<s>" , snake_case :int="</s>" , snake_case :Union[str, Any]="<pad>" , snake_case :List[str]=False , snake_case :Optional[Any]=False , **snake_case :Optional[Any] , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , unk_token=snake_case , bos_token=snake_case , eos_token=snake_case , pad_token=snake_case , add_prefix_space=snake_case , clean_up_tokenization_spaces=snake_case , **snake_case , )
A_ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : Any = getattr(snake_case , pre_tok_state.pop("type" ) )
A_ : Dict = add_prefix_space
A_ : List[Any] = pre_tok_class(**snake_case )
A_ : str = add_prefix_space
def SCREAMING_SNAKE_CASE ( self :List[Any] , *snake_case :Optional[int] , **snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = kwargs.get("is_split_into_words" , snake_case )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :int , *snake_case :Union[str, Any] , **snake_case :int ):
'''simple docstring'''
A_ : Union[str, Any] = kwargs.get("is_split_into_words" , snake_case )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
A_ : List[str] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :"Conversation" ):
'''simple docstring'''
A_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case , add_special_tokens=snake_case ) + [self.eos_token_id] )
if len(snake_case ) > self.model_max_length:
A_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
| 300
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *snake_case :Tuple , **snake_case :Any ):
'''simple docstring'''
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , snake_case , )
super().__init__(*snake_case , **snake_case )
| 300
| 1
|
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
_lowerCAmelCase : Any = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_lowerCAmelCase : Dict = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_lowerCAmelCase : List[Any] = sorted(arg_to_scheduler.keys())
_lowerCAmelCase : Tuple = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class __magic_name__ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self :int , snake_case :argparse.Namespace , snake_case :str=None , snake_case :Optional[int]="base" , snake_case :Optional[Any]=None , snake_case :Any=None , snake_case :Dict=None , **snake_case :Tuple , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(snake_case )
A_ : str = 0
A_ : Tuple = Path(self.hparams.output_dir )
A_ : Optional[int] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
A_ : Any = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=snake_case , **snake_case , )
else:
A_ : PretrainedConfig = config
A_ : Any = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , snake_case , snake_case ):
assert hasattr(self.config , snake_case ), f"model config doesn't have a `{p}` attribute"
setattr(self.config , snake_case , getattr(self.hparams , snake_case ) )
if tokenizer is None:
A_ : str = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=snake_case , )
else:
A_ : PreTrainedTokenizer = tokenizer
A_ : List[Any] = MODEL_MODES[mode]
if model is None:
A_ : Union[str, Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=snake_case , )
else:
A_ : Optional[Any] = model
def SCREAMING_SNAKE_CASE ( self :Dict , *snake_case :List[Any] , **snake_case :Tuple ):
'''simple docstring'''
A_ : Optional[int] = self.model_type.from_pretrained(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : List[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
A_ : Dict = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
A_ : str = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Optional[int] = self.model
A_ : Optional[Any] = ["bias", "LayerNorm.weight"]
A_ : Tuple = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
A_ : Optional[Any] = Adafactor(
snake_case , lr=self.hparams.learning_rate , scale_parameter=snake_case , relative_step=snake_case )
else:
A_ : Union[str, Any] = AdamW(
snake_case , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
A_ : Tuple = optimizer
A_ : List[Any] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Dict , snake_case :Optional[Any] ):
'''simple docstring'''
return self.validation_step(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :str ):
'''simple docstring'''
return self.validation_end(snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
A_ : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] ):
'''simple docstring'''
if stage == "test":
A_ : Tuple = len(self.test_dataloader().dataset )
else:
A_ : Optional[int] = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=snake_case )
A_ : Union[str, Any] = len(self.train_dataloader().dataset )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :str , snake_case :int , snake_case :bool = False ):
'''simple docstring'''
raise NotImplementedError("You must implement this for your task" )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return self.train_loader
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
snake_case , list(filter(snake_case , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict[str, Any] ):
'''simple docstring'''
A_ : str = self.output_dir.joinpath("best_tfmr" )
A_ : Optional[int] = self.step_count
self.model.save_pretrained(snake_case )
self.tokenizer.save_pretrained(snake_case )
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :Optional[int] , snake_case :Tuple ):
'''simple docstring'''
parser.add_argument(
"--model_name_or_path" , default=snake_case , type=snake_case , required=snake_case , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=snake_case , help="Pretrained config name or path if not the same as model_name" )
parser.add_argument(
"--tokenizer_name" , default=snake_case , type=snake_case , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(snake_case ).parent / "test_run" / "cache" ) , type=snake_case , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=snake_case , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=snake_case , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=snake_case , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=snake_case , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5e-5 , type=snake_case , help="The initial learning rate for Adam." )
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=snake_case , metavar=snake_case , type=snake_case , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=snake_case , help="Weight decay if we apply some." )
parser.add_argument("--adam_epsilon" , default=1e-8 , type=snake_case , help="Epsilon for Adam optimizer." )
parser.add_argument("--warmup_steps" , default=0 , type=snake_case , help="Linear warmup over warmup_steps." )
parser.add_argument("--num_workers" , default=4 , type=snake_case , help="kwarg passed to DataLoader" )
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=snake_case )
parser.add_argument("--train_batch_size" , default=32 , type=snake_case )
parser.add_argument("--eval_batch_size" , default=32 , type=snake_case )
parser.add_argument("--adafactor" , action="store_true" )
class __magic_name__ ( pl.Callback ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :str , snake_case :Optional[int] ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __magic_name__ ( pl.Callback ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Dict , snake_case :Optional[int] ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(snake_case )
class __magic_name__ ( pl.Callback ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :int , snake_case :int ):
'''simple docstring'''
A_ : Optional[int] = trainer.lr_schedulers[0]["scheduler"]
A_ : Union[str, Any] = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :pl.Trainer , snake_case :pl.LightningModule ):
'''simple docstring'''
rank_zero_info("***** Validation results *****" )
A_ : List[str] = trainer.callback_metrics
# Log results
for key in sorted(snake_case ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(snake_case , str(metrics[key] ) ) )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :pl.Trainer , snake_case :pl.LightningModule ):
'''simple docstring'''
rank_zero_info("***** Test results *****" )
A_ : int = trainer.callback_metrics
# Log and save results to file
A_ : Tuple = os.path.join(pl_module.hparams.output_dir , "test_results.txt" )
with open(snake_case , "w" ) as writer:
for key in sorted(snake_case ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(snake_case , str(metrics[key] ) ) )
writer.write("{} = {}\n".format(snake_case , str(metrics[key] ) ) )
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"--output_dir" , default=str(Path(_lowerCAmelCase ).parent / "test_run" / "model_checkpoints" ) , type=_lowerCAmelCase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=_lowerCAmelCase , default="O2" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=_lowerCAmelCase )
parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=_lowerCAmelCase , help="Max gradient norm" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=_lowerCAmelCase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--seed" , type=_lowerCAmelCase , default=42 , help="random seed for initialization" )
parser.add_argument(
"--data_dir" , default=str(Path(_lowerCAmelCase ).parent / "test_run" / "dummy-train-data" ) , type=_lowerCAmelCase , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , )
def __snake_case ( _lowerCAmelCase : BaseTransformer , _lowerCAmelCase : argparse.Namespace , _lowerCAmelCase : int=None , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[int]=[] , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : int , ) -> Any:
pl.seed_everything(args.seed )
# init model
A_ : Union[str, Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_lowerCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
A_ : List[str] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_lowerCAmelCase )
if logging_callback is None:
A_ : Optional[int] = LoggingCallback()
A_ : Tuple = {}
if args.fpaa:
A_ : Union[str, Any] = 16
if args.gpus > 1:
A_ : Dict = "auto"
A_ : Any = "ddp"
A_ : Dict = args.accumulate_grad_batches
A_ : List[Any] = None
A_ : Optional[int] = "auto"
A_ : Any = pl.Trainer.from_argparse_args(
_lowerCAmelCase , weights_summary=_lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **_lowerCAmelCase , )
if args.do_train:
trainer.fit(_lowerCAmelCase )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer
| 300
|
from __future__ import annotations
def __snake_case ( _lowerCAmelCase : list[float] ) -> bool:
if len(_lowerCAmelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A_ : List[str] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 1
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_lowerCAmelCase : Optional[int] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __magic_name__ :
"""simple docstring"""
def __init__( self :int , snake_case :Dict , snake_case :Optional[int]=16 , snake_case :Tuple=13 , snake_case :Tuple=7 , snake_case :int=14 , snake_case :str=10 , snake_case :str=19 , snake_case :Optional[Any]=5 , snake_case :int=4 , snake_case :Any=True , snake_case :Dict=16 , snake_case :Union[str, Any]=2 , snake_case :List[Any]=4 , snake_case :Optional[Any]=4 , snake_case :Optional[Any]="gelu" , snake_case :int=0.1 , snake_case :Union[str, Any]=0.1 , snake_case :Union[str, Any]=[1, 2, 3, 4, 5] , snake_case :str=25 , snake_case :Tuple=5 , ):
'''simple docstring'''
A_ : Optional[int] = d_model
A_ : Tuple = parent
A_ : List[str] = batch_size
A_ : Dict = prediction_length
A_ : List[str] = context_length
A_ : List[str] = cardinality
A_ : Dict = num_time_features
A_ : Optional[int] = lags_sequence
A_ : List[str] = embedding_dimension
A_ : str = is_training
A_ : Tuple = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : List[str] = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Tuple = context_length
A_ : Any = prediction_length + label_length
A_ : str = label_length
A_ : Union[str, Any] = moving_average
A_ : Union[str, Any] = autocorrelation_factor
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :str ):
'''simple docstring'''
A_ : str = config.context_length + max(config.lags_sequence )
A_ : Tuple = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
A_ : List[str] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
A_ : List[str] = floats_tensor([self.batch_size, _past_length] )
A_ : List[Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
A_ : Optional[int] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
A_ : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length] )
A_ : Any = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : str = self.get_config()
A_ : int = self.prepare_autoformer_inputs_dict(snake_case )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Dict , snake_case :str ):
'''simple docstring'''
A_ : Tuple = AutoformerModel(config=snake_case ).to(snake_case ).eval()
A_ : Optional[Any] = model(**snake_case )
A_ : Optional[Any] = outputs.encoder_last_hidden_state
A_ : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : int = model.get_encoder()
encoder.save_pretrained(snake_case )
A_ : Union[str, Any] = AutoformerEncoder.from_pretrained(snake_case ).to(snake_case )
A_ , A_ , A_ , A_ , A_ : str = model.create_network_inputs(**snake_case )
A_ , A_ : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
A_ : List[str] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
A_ : int = encoder(inputs_embeds=snake_case )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
A_ : Any = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
A_ : Union[str, Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
A_ : List[str] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
A_ : int = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[str] = model.get_decoder()
decoder.save_pretrained(snake_case )
A_ : List[str] = AutoformerDecoder.from_pretrained(snake_case ).to(snake_case )
A_ : List[str] = decoder(
trend=snake_case , inputs_embeds=snake_case , encoder_hidden_states=snake_case , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__UpperCamelCase = (AutoformerForPrediction,) if is_torch_available() else ()
__UpperCamelCase = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : List[str] = AutoformerModelTester(self )
A_ : Union[str, Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
A_ , A_ : Any = model_class.from_pretrained(snake_case , output_loading_info=snake_case )
self.assertEqual(info["missing_keys"] , [] )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case )
@unittest.skip(reason="Model has no tokens embeddings" )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = inspect.signature(getattr(snake_case , "forward" ) )
# The main input is the name of the argument after `self`
A_ : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(snake_case )
A_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[Any] = [*signature.parameters.keys()]
A_ : Optional[int] = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(snake_case )] , snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = True
A_ : Any = getattr(self.model_tester , "seq_length" , snake_case )
A_ : List[str] = getattr(self.model_tester , "decoder_seq_length" , snake_case )
A_ : str = getattr(self.model_tester , "encoder_seq_length" , snake_case )
A_ : List[Any] = getattr(self.model_tester , "d_model" , snake_case )
A_ : str = getattr(self.model_tester , "num_attention_heads" , snake_case )
A_ : Dict = d_model // num_attention_heads
for model_class in self.all_model_classes:
A_ : Optional[int] = True
A_ : List[Any] = False
A_ : Union[str, Any] = True
A_ : Optional[int] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
A_ : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A_ : str = True
A_ : Dict = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
A_ : Optional[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
A_ : int = outputs.encoder_attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
A_ : Any = len(snake_case )
A_ : Dict = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case , snake_case )
# decoder attentions
A_ : Dict = outputs.decoder_attentions
self.assertIsInstance(snake_case , (list, tuple) )
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
A_ : Optional[Any] = outputs.cross_attentions
self.assertIsInstance(snake_case , (list, tuple) )
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
A_ : Tuple = True
A_ : Union[str, Any] = True
A_ : str = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
A_ : Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) )
self.assertEqual(out_len + 2 , len(snake_case ) )
A_ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def __snake_case ( _lowerCAmelCase : Any="train-batch.pt" ) -> List[str]:
A_ : Optional[int] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=_lowerCAmelCase , repo_type="dataset" )
A_ : Optional[int] = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
return batch
@require_torch
@slow
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Any = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case )
A_ : str = prepare_batch()
with torch.no_grad():
A_ : Dict = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
A_ : Dict = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case )
A_ : int = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=snake_case )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case , atol=snake_case ) )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case )
A_ : Optional[Any] = prepare_batch("val-batch.pt" )
with torch.no_grad():
A_ : Optional[Any] = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
A_ : int = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case )
A_ : Union[str, Any] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=snake_case )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case , atol=snake_case ) )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case )
A_ : Any = prepare_batch("val-batch.pt" )
with torch.no_grad():
A_ : int = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
A_ : List[str] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case )
A_ : Optional[int] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=snake_case )
A_ : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case , rtol=1e-1 ) )
| 300
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , snake_case :AutoencoderKL , snake_case :CLIPTextModel , snake_case :CLIPTokenizer , snake_case :UNetaDConditionModel , snake_case :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case :StableDiffusionSafetyChecker , snake_case :CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def __call__( self :Any , snake_case :Union[str, List[str]] , snake_case :int = 512 , snake_case :int = 512 , snake_case :int = 50 , snake_case :float = 7.5 , snake_case :Optional[Union[str, List[str]]] = None , snake_case :Optional[int] = 1 , snake_case :float = 0.0 , snake_case :Optional[torch.Generator] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[str] = "pil" , snake_case :bool = True , snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case :int = 1 , snake_case :Optional[torch.FloatTensor] = None , **snake_case :Optional[Any] , ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
A_ : Dict = 1
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = len(snake_case )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case )}." )
# get prompt text embeddings
A_ : int = self.tokenizer(
snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_ , A_ , A_ : int = text_embeddings.shape
A_ : List[str] = text_embeddings.repeat(1 , snake_case , 1 )
A_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ : List[str]
if negative_prompt is None:
A_ : List[str] = [""]
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !="
f" {type(snake_case )}." )
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A_ : Any = negative_prompt
A_ : Optional[int] = text_input_ids.shape[-1]
A_ : Dict = self.tokenizer(
snake_case , padding="max_length" , max_length=snake_case , truncation=snake_case , return_tensors="pt" , )
A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ : Tuple = uncond_embeddings.shape[1]
A_ : Dict = uncond_embeddings.repeat(snake_case , snake_case , 1 )
A_ : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A_ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ : Tuple = torch.randn(
snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(self.device )
A_ : Optional[Any] = torch.randn(snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(
self.device )
else:
A_ : int = torch.randn(
snake_case , generator=snake_case , device=self.device , dtype=snake_case )
A_ : Optional[int] = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A_ : Tuple = latents_reference.to(self.device )
A_ : Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A_ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A_ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A_ : Optional[Any] = 0 if dx < 0 else dx
A_ : Optional[Any] = 0 if dy < 0 else dy
A_ : List[str] = max(-dx , 0 )
A_ : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A_ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ : str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : List[str] = {}
if accepts_eta:
A_ : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
A_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
A_ : List[str] = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.chunk(2 )
A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
A_ : List[str] = 1 / 0.18215 * latents
A_ : Tuple = self.vae.decode(snake_case ).sample
A_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A_ : int = self.feature_extractor(self.numpy_to_pil(snake_case ) , return_tensors="pt" ).to(
self.device )
A_ , A_ : List[str] = self.safety_checker(
images=snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A_ : List[str] = None
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
| 300
| 1
|
import datasets
_lowerCAmelCase : str = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
_lowerCAmelCase : str = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
_lowerCAmelCase : Optional[int] = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ) -> int:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def SCREAMING_SNAKE_CASE ( self :str , snake_case :List[Any] , snake_case :List[str] ):
'''simple docstring'''
return {"accuracy": simple_accuracy(snake_case , snake_case )}
| 300
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Dict:
A_ : Optional[Any] = nn.functional.normalize(_lowerCAmelCase )
A_ : List[str] = nn.functional.normalize(_lowerCAmelCase )
return torch.mm(_lowerCAmelCase , normalized_text_embeds.t() )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self :int , snake_case :CLIPConfig ):
'''simple docstring'''
super().__init__(snake_case )
A_ : int = CLIPVisionModel(config.vision_config )
A_ : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case )
A_ : Tuple = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case )
A_ : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case )
A_ : List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=snake_case )
A_ : int = nn.Parameter(torch.ones(3 ) , requires_grad=snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ):
'''simple docstring'''
A_ : List[Any] = self.vision_model(snake_case )[1] # pooled_output
A_ : List[Any] = self.visual_projection(snake_case )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Optional[Any] = cosine_distance(snake_case , self.special_care_embeds ).cpu().float().numpy()
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ).cpu().float().numpy()
A_ : Union[str, Any] = []
A_ : Any = image_embeds.shape[0]
for i in range(snake_case ):
A_ : Optional[int] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A_ : Optional[Any] = special_cos_dist[i][concept_idx]
A_ : Tuple = self.special_care_embeds_weights[concept_idx].item()
A_ : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
A_ : Any = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A_ : Tuple = cos_dist[i][concept_idx]
A_ : Tuple = self.concept_embeds_weights[concept_idx].item()
A_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(snake_case )
result.append(snake_case )
A_ : Any = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor ):
'''simple docstring'''
A_ : List[str] = self.vision_model(snake_case )[1] # pooled_output
A_ : int = self.visual_projection(snake_case )
A_ : Tuple = cosine_distance(snake_case , self.special_care_embeds )
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
A_ : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A_ : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
A_ : Optional[Any] = special_care * 0.01
A_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A_ : Union[str, Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A_ : Union[str, Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 300
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''openai/whisper-base'''
__UpperCamelCase = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
__UpperCamelCase = '''transcriber'''
__UpperCamelCase = WhisperProcessor
__UpperCamelCase = WhisperForConditionalGeneration
__UpperCamelCase = ['''audio''']
__UpperCamelCase = ['''text''']
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Union[str, Any] ):
'''simple docstring'''
return self.pre_processor(snake_case , return_tensors="pt" ).input_features
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Any ):
'''simple docstring'''
return self.model.generate(inputs=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Tuple ):
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case , skip_special_tokens=snake_case )[0]
| 300
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
A_ : Tuple = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Dict:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : str = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" )
A_ : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : Optional[Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Optional[Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ) -> Any:
A_ : Dict = dct.pop(_lowerCAmelCase )
A_ : List[Any] = val
def __snake_case ( _lowerCAmelCase : List[str] ) -> int:
if "handwritten" in checkpoint_url:
A_ : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
A_ : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
A_ : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=_lowerCAmelCase )
A_ : Tuple = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : Tuple = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Optional[Any] = 1024
A_ : Union[str, Any] = 4096
A_ : Union[str, Any] = 24
A_ : List[Any] = 16
A_ : List[str] = 1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Dict = False
A_ : int = "relu"
A_ : Optional[int] = 1024
A_ : Any = True
A_ : List[Any] = False
A_ : Optional[int] = False
# load HuggingFace model
A_ : Union[str, Any] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase )
A_ : str = TrOCRForCausalLM(_lowerCAmelCase )
A_ : List[str] = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
A_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" , check_hash=_lowerCAmelCase )["model"]
A_ : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Dict = state_dict.pop(_lowerCAmelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
A_ : List[str] = val
else:
A_ : Optional[Any] = val
# load state dict
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A_ : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
A_ : Any = RobertaTokenizer.from_pretrained("roberta-large" )
A_ : Union[str, Any] = TrOCRProcessor(_lowerCAmelCase , _lowerCAmelCase )
A_ : List[str] = processor(images=prepare_img(_lowerCAmelCase ) , return_tensors="pt" ).pixel_values
# verify logits
A_ : Union[str, Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Optional[int] = model(pixel_values=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
A_ : Tuple = outputs.logits
A_ : Union[str, Any] = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Union[str, Any] = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : str = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 300
| 1
|
def __snake_case ( _lowerCAmelCase : float , _lowerCAmelCase : int ) -> float:
if digit_amount > 0:
return round(number - int(_lowerCAmelCase ) , _lowerCAmelCase )
return number - int(_lowerCAmelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 300
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 1
@register_to_config
def __init__( self :Union[str, Any] , snake_case :int = 2_000 , snake_case :float = 0.15 , snake_case :float = 0.01 , snake_case :float = 1348.0 , snake_case :float = 1e-5 , snake_case :int = 1 , ):
'''simple docstring'''
A_ : Dict = sigma_max
# setable values
A_ : List[Any] = None
self.set_sigmas(snake_case , snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :torch.FloatTensor , snake_case :Optional[int] = None ):
'''simple docstring'''
return sample
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :float = None , snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
A_ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
A_ : Tuple = torch.linspace(1 , snake_case , snake_case , device=snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int , snake_case :float = None , snake_case :float = None , snake_case :float = None ):
'''simple docstring'''
A_ : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min
A_ : Any = sigma_max if sigma_max is not None else self.config.sigma_max
A_ : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(snake_case , snake_case )
A_ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
A_ : Any = torch.exp(torch.linspace(math.log(snake_case ) , math.log(snake_case ) , snake_case ) )
A_ : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Dict ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :int , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
A_ : int = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
A_ : Optional[Any] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
A_ : Dict = timesteps.to(self.discrete_sigmas.device )
A_ : Optional[int] = self.discrete_sigmas[timesteps].to(sample.device )
A_ : int = self.get_adjacent_sigma(snake_case , snake_case ).to(sample.device )
A_ : Union[str, Any] = torch.zeros_like(snake_case )
A_ : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
A_ : Optional[int] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
A_ : Tuple = diffusion.unsqueeze(-1 )
A_ : Optional[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
A_ : List[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=snake_case , device=sample.device , dtype=sample.dtype )
A_ : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
A_ : Any = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=snake_case , prev_sample_mean=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
A_ : Dict = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
A_ : int = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
A_ : List[Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
A_ : Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
A_ : Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
A_ : int = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
A_ : str = step_size.unsqueeze(-1 )
A_ : Optional[Any] = sample + step_size * model_output
A_ : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , ):
'''simple docstring'''
A_ : Union[str, Any] = timesteps.to(original_samples.device )
A_ : List[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
A_ : List[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(snake_case ) * sigmas[:, None, None, None]
)
A_ : Optional[int] = noise + original_samples
return noisy_samples
def __len__( self :Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 1
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def __snake_case ( _lowerCAmelCase : Union[List, PIL.Image.Image, torch.Tensor] ) -> Optional[Any]:
warnings.warn(
"The preprocess method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor.preprocess instead" , _lowerCAmelCase , )
if isinstance(_lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
A_ : str = [image]
if isinstance(image[0] , PIL.Image.Image ):
A_ , A_ : Union[str, Any] = image[0].size
A_ , A_ : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
A_ : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
A_ : int = np.concatenate(_lowerCAmelCase , axis=0 )
A_ : Any = np.array(_lowerCAmelCase ).astype(np.floataa ) / 2_55.0
A_ : int = image.transpose(0 , 3 , 1 , 2 )
A_ : Union[str, Any] = 2.0 * image - 1.0
A_ : Optional[Any] = torch.from_numpy(_lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
A_ : List[Any] = torch.cat(_lowerCAmelCase , dim=0 )
return image
def __snake_case ( _lowerCAmelCase : Union[List, PIL.Image.Image, torch.Tensor] ) -> Any:
if isinstance(_lowerCAmelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
A_ : Optional[Any] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
A_ , A_ : str = mask[0].size
A_ , A_ : int = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A_ : Optional[Any] = [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask]
A_ : List[str] = np.concatenate(_lowerCAmelCase , axis=0 )
A_ : List[Any] = mask.astype(np.floataa ) / 2_55.0
A_ : List[str] = 0
A_ : str = 1
A_ : int = torch.from_numpy(_lowerCAmelCase )
elif isinstance(mask[0] , torch.Tensor ):
A_ : Tuple = torch.cat(_lowerCAmelCase , dim=0 )
return mask
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self :Optional[Any] , snake_case :Union[str, Any] , snake_case :List[Any] ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case , scheduler=snake_case )
@torch.no_grad()
def __call__( self :str , snake_case :Union[torch.Tensor, PIL.Image.Image] , snake_case :Union[torch.Tensor, PIL.Image.Image] , snake_case :int = 250 , snake_case :float = 0.0 , snake_case :int = 10 , snake_case :int = 10 , snake_case :Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case :Optional[str] = "pil" , snake_case :bool = True , ):
'''simple docstring'''
A_ : int = image
A_ : List[Any] = _preprocess_image(snake_case )
A_ : List[str] = original_image.to(device=self.device , dtype=self.unet.dtype )
A_ : str = _preprocess_mask(snake_case )
A_ : Any = mask_image.to(device=self.device , dtype=self.unet.dtype )
A_ : Any = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(snake_case , snake_case ) and len(snake_case ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(snake_case )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
A_ : Optional[int] = original_image.shape
A_ : Dict = randn_tensor(snake_case , generator=snake_case , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(snake_case , snake_case , snake_case , self.device )
A_ : int = eta
A_ : Tuple = self.scheduler.timesteps[0] + 1
A_ : str = generator[0] if isinstance(snake_case , snake_case ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
A_ : List[Any] = self.unet(snake_case , snake_case ).sample
# compute previous image: x_t -> x_t-1
A_ : Any = self.scheduler.step(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
A_ : Union[str, Any] = self.scheduler.undo_step(snake_case , snake_case , snake_case )
A_ : List[str] = t
A_ : Any = (image / 2 + 0.5).clamp(0 , 1 )
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 300
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : float | Decimal , _lowerCAmelCase : float = 10**-10 ) -> float:
A_ : Dict = a
while True:
A_ : Union[str, Any] = Decimal(_lowerCAmelCase ) - (
Decimal(eval(_lowerCAmelCase ) ) / Decimal(eval(str(diff(_lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCAmelCase ) ) < precision: # noqa: S307
return float(_lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 300
| 1
|
from math import isclose, sqrt
def __snake_case ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> tuple[float, float, float]:
A_ : List[str] = point_y / 4 / point_x
A_ : Optional[Any] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
A_ : Tuple = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
A_ : Tuple = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
A_ : Tuple = outgoing_gradient**2 + 4
A_ : int = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
A_ : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
A_ : int = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
A_ : str = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
A_ : Tuple = x_minus if isclose(_lowerCAmelCase , _lowerCAmelCase ) else x_plus
A_ : List[Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __snake_case ( _lowerCAmelCase : float = 1.4 , _lowerCAmelCase : float = -9.6 ) -> int:
A_ : int = 0
A_ : float = first_x_coord
A_ : float = first_y_coord
A_ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
A_ , A_ , A_ : Optional[Any] = next_point(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 300
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowerCAmelCase : List[Any] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
_lowerCAmelCase : Union[str, Any] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
_lowerCAmelCase : Optional[Any] = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Optional[int] , snake_case :List[Any] , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , ):
'''simple docstring'''
A_ : List[str] = len(references[0] )
if any(len(snake_case ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A_ : int = [[refs[i] for refs in references] for i in range(snake_case )]
A_ : Optional[Any] = TER(
normalized=snake_case , no_punct=snake_case , asian_support=snake_case , case_sensitive=snake_case , )
A_ : List[Any] = sb_ter.corpus_score(snake_case , snake_case )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 300
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_lowerCAmelCase : Dict = None
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : Optional[int] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
_lowerCAmelCase : Any = {
'''google/rembert''': 256,
}
_lowerCAmelCase : List[str] = '''▁'''
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = RemBertTokenizer
def __init__( self :Optional[int] , snake_case :Any=None , snake_case :Union[str, Any]=None , snake_case :Any=True , snake_case :Optional[int]=True , snake_case :Optional[Any]=False , snake_case :Any="[CLS]" , snake_case :Union[str, Any]="[SEP]" , snake_case :Optional[int]="<unk>" , snake_case :List[Any]="[SEP]" , snake_case :Tuple="<pad>" , snake_case :Optional[int]="[CLS]" , snake_case :Any="[MASK]" , **snake_case :str , ):
'''simple docstring'''
A_ : Optional[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , **snake_case , )
A_ : List[Any] = do_lower_case
A_ : int = remove_space
A_ : Tuple = keep_accents
A_ : List[str] = vocab_file
A_ : Union[str, Any] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : Any = [self.sep_token_id]
A_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :List[int] , snake_case :Optional[List[int]] = None , snake_case :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1]
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : str = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self :str , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error("Vocabulary path ({}) should be a directory".format(snake_case ) )
return
A_ : Tuple = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
return (out_vocab_file,)
| 300
|
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int ) -> str:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=0 ) -> Any:
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[column] )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Any=float("inf" ) ) -> int:
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowerCAmelCase ):
A_ : Tuple = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
A_ : Union[str, Any] = current_dis
return min_dis
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=float("inf" ) ) -> Dict:
for i in range(min(6 , points_counts - 1 ) , _lowerCAmelCase ):
for j in range(max(0 , i - 6 ) , _lowerCAmelCase ):
A_ : List[Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
A_ : Union[str, Any] = current_dis
return min_dis
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> List[str]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(_lowerCAmelCase , _lowerCAmelCase )
# recursion
A_ : Optional[int] = points_counts // 2
A_ : List[Any] = closest_pair_of_points_sqr(
_lowerCAmelCase , points_sorted_on_y[:mid] , _lowerCAmelCase )
A_ : List[Any] = closest_pair_of_points_sqr(
_lowerCAmelCase , points_sorted_on_y[mid:] , points_counts - mid )
A_ : Tuple = min(_lowerCAmelCase , _lowerCAmelCase )
A_ : Dict = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowerCAmelCase )
A_ : Tuple = dis_between_closest_in_strip(
_lowerCAmelCase , len(_lowerCAmelCase ) , _lowerCAmelCase )
return min(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Any:
A_ : Optional[Any] = column_based_sort(_lowerCAmelCase , column=0 )
A_ : Optional[int] = column_based_sort(_lowerCAmelCase , column=1 )
return (
closest_pair_of_points_sqr(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
) ** 0.5
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 300
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_lowerCAmelCase : Tuple = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : Any = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : Optional[int] = {
'''google/electra-small-generator''': 512,
'''google/electra-base-generator''': 512,
'''google/electra-large-generator''': 512,
'''google/electra-small-discriminator''': 512,
'''google/electra-base-discriminator''': 512,
'''google/electra-large-discriminator''': 512,
}
_lowerCAmelCase : str = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ElectraTokenizer
def __init__( self :Dict , snake_case :Tuple=None , snake_case :Optional[int]=None , snake_case :str=True , snake_case :int="[UNK]" , snake_case :Union[str, Any]="[SEP]" , snake_case :Tuple="[PAD]" , snake_case :Optional[int]="[CLS]" , snake_case :Tuple="[MASK]" , snake_case :Optional[Any]=True , snake_case :Optional[int]=None , **snake_case :Dict , ):
'''simple docstring'''
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
A_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
A_ : int = getattr(snake_case , normalizer_state.pop("type" ) )
A_ : Any = do_lower_case
A_ : List[Any] = strip_accents
A_ : Dict = tokenize_chinese_chars
A_ : str = normalizer_class(**snake_case )
A_ : Dict = do_lower_case
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Dict , snake_case :str=None ):
'''simple docstring'''
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : Optional[int] = [self.sep_token_id]
A_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
A_ : List[str] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 300
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self :Dict , snake_case :Optional[int] , snake_case :Tuple=13 , snake_case :List[Any]=30 , snake_case :Union[str, Any]=2 , snake_case :List[Any]=3 , snake_case :Tuple=True , snake_case :Dict=True , snake_case :Dict=32 , snake_case :List[str]=5 , snake_case :Optional[Any]=4 , snake_case :Any=37 , snake_case :Dict="gelu" , snake_case :List[str]=0.1 , snake_case :str=0.1 , snake_case :Tuple=10 , snake_case :str=0.02 , snake_case :Optional[Any]=None , ):
'''simple docstring'''
A_ : Tuple = parent
A_ : int = batch_size
A_ : List[str] = image_size
A_ : List[Any] = patch_size
A_ : Optional[Any] = num_channels
A_ : List[Any] = is_training
A_ : Tuple = use_labels
A_ : Union[str, Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Any = num_attention_heads
A_ : List[str] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Any = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : Dict = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Optional[int] = (image_size // patch_size) ** 2
A_ : List[str] = num_patches + 1
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Tuple = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[Any] , snake_case :str , snake_case :Tuple ):
'''simple docstring'''
A_ : Optional[Any] = ViTMSNModel(config=snake_case )
model.to(snake_case )
model.eval()
A_ : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[int] , snake_case :List[str] , snake_case :List[str] ):
'''simple docstring'''
A_ : Dict = self.type_sequence_label_size
A_ : Tuple = ViTMSNForImageClassification(snake_case )
model.to(snake_case )
model.eval()
A_ : Union[str, Any] = model(snake_case , labels=snake_case )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Union[str, Any] = 1
A_ : int = ViTMSNForImageClassification(snake_case )
model.to(snake_case )
model.eval()
A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Optional[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : List[str] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Optional[int] = config_and_inputs
A_ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = ViTMSNModelTester(self )
A_ : str = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[int] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(snake_case )
A_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[str] = [*signature.parameters.keys()]
A_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = ViTMSNModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __snake_case ( ) -> Optional[Any]:
A_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
torch.manual_seed(2 )
A_ : Any = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(snake_case )
A_ : List[str] = self.default_image_processor
A_ : int = prepare_img()
A_ : List[str] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
A_ : Optional[int] = model(**snake_case )
# verify the logits
A_ : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case )
A_ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
| 300
| 1
|
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self :Optional[Any] , snake_case :Optional[Any]="</s>" , snake_case :Dict="<unk>" , snake_case :List[str]="<pad>" , snake_case :Optional[Any]=125 , snake_case :Any=None , **snake_case :Optional[Any] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
A_ : Optional[int] = [f"<extra_id_{i}>" for i in range(snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
A_ : Union[str, Any] = len(set(filter(lambda snake_case : bool("extra_id" in str(snake_case ) ) , snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
A_ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
A_ : int = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
A_ : List[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token
super().__init__(
eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , extra_ids=snake_case , additional_special_tokens=snake_case , **snake_case , )
A_ : str = extra_ids
A_ : Tuple = 2**8 # utf is 8 bits
# define special tokens dict
A_ : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
A_ : List[str] = len(self.special_tokens_encoder )
A_ : str = len(snake_case )
for i, token in enumerate(snake_case ):
A_ : str = self.vocab_size + i - n
A_ : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[int] , snake_case :Optional[List[int]] = None , snake_case :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case )) + [1]
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
def SCREAMING_SNAKE_CASE ( self :str , snake_case :List[int] ):
'''simple docstring'''
if len(snake_case ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = self._add_eos_if_not_present(snake_case )
if token_ids_a is None:
return token_ids_a
else:
A_ : Union[str, Any] = self._add_eos_if_not_present(snake_case )
return token_ids_a + token_ids_a
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str ):
'''simple docstring'''
A_ : str = [chr(snake_case ) for i in text.encode("utf-8" )]
return tokens
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Union[str, Any] ):
'''simple docstring'''
if token in self.special_tokens_encoder:
A_ : Optional[Any] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
A_ : str = self.added_tokens_encoder[token]
elif len(snake_case ) != 1:
A_ : str = self.unk_token_id
else:
A_ : List[Any] = ord(snake_case ) + self._num_special_tokens
return token_id
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :Optional[Any] ):
'''simple docstring'''
if index in self.special_tokens_decoder:
A_ : Union[str, Any] = self.special_tokens_decoder[index]
else:
A_ : Optional[int] = chr(index - self._num_special_tokens )
return token
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Any ):
'''simple docstring'''
A_ : Any = B""
for token in tokens:
if token in self.special_tokens_decoder:
A_ : List[str] = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
A_ : Dict = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
A_ : Tuple = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
A_ : Optional[Any] = token.encode("utf-8" )
else:
A_ : Any = bytes([ord(snake_case )] )
bstring += tok_string
A_ : int = bstring.decode("utf-8" , errors="ignore" )
return string
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
return ()
| 300
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :str ):
'''simple docstring'''
A_ : Dict = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**snake_case )
return config
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Tuple = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : List[str] = scheduler_class(**snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : int = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : int = scheduler_class(**snake_case )
A_ : Tuple = len(snake_case )
A_ : List[str] = self.dummy_model()
A_ : Optional[Any] = self.dummy_sample_deter
A_ : List[str] = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Tuple = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : Optional[int] = pred_prev_sample
A_ : Tuple = torch.sum(torch.abs(snake_case ) )
A_ : str = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Optional[int] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config(prediction_type="v_prediction" )
A_ : List[str] = scheduler_class(**snake_case )
A_ : int = len(snake_case )
A_ : Dict = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Any = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Optional[int] = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Tuple = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : List[str] = pred_prev_sample
A_ : Optional[Any] = torch.sum(torch.abs(snake_case ) )
A_ : List[str] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Dict = scheduler_class(**snake_case )
A_ : Optional[int] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case )
A_ : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(snake_case ):
if i == len(snake_case ) - 1:
A_ : str = -1
else:
A_ : List[str] = timesteps[i + 1]
A_ : Optional[int] = scheduler.previous_timestep(snake_case )
A_ : List[str] = prev_t.item()
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**snake_case )
A_ : List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Any = self.scheduler_classes[0]
A_ : Union[str, Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Union[str, Any] = [100, 87, 50, 1, 0]
A_ : Optional[int] = len(snake_case )
with self.assertRaises(snake_case , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=snake_case )
| 300
| 1
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''t5'''
__UpperCamelCase = ['''past_key_values''']
__UpperCamelCase = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :Optional[Any] , snake_case :List[Any]=32_128 , snake_case :str=512 , snake_case :List[str]=64 , snake_case :int=2_048 , snake_case :List[Any]=6 , snake_case :Tuple=None , snake_case :Optional[Any]=8 , snake_case :Optional[int]=32 , snake_case :Tuple=128 , snake_case :Optional[Any]=0.1 , snake_case :int=1e-6 , snake_case :str=1.0 , snake_case :Dict="relu" , snake_case :Optional[int]=True , snake_case :Union[str, Any]=True , snake_case :Optional[int]=0 , snake_case :Any=1 , **snake_case :str , ):
'''simple docstring'''
A_ : List[str] = vocab_size
A_ : Tuple = d_model
A_ : Any = d_kv
A_ : int = d_ff
A_ : Optional[int] = num_layers
A_ : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A_ : Optional[Any] = num_heads
A_ : Dict = relative_attention_num_buckets
A_ : List[Any] = relative_attention_max_distance
A_ : int = dropout_rate
A_ : Optional[Any] = layer_norm_epsilon
A_ : Tuple = initializer_factor
A_ : Union[str, Any] = feed_forward_proj
A_ : Dict = use_cache
A_ : Any = self.feed_forward_proj.split("-" )
A_ : Any = act_info[-1]
A_ : List[str] = act_info[0] == "gated"
if len(snake_case ) > 1 and act_info[0] != "gated" or len(snake_case ) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A_ : Tuple = "gelu_new"
super().__init__(
pad_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , **snake_case , )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[Any] = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
A_ : Union[str, Any] = "past_encoder_sequence + sequence"
A_ : Union[str, Any] = {0: "batch"}
A_ : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A_ : Any = {0: "batch", 1: "decoder_sequence"}
A_ : str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(snake_case , direction="inputs" )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return 13
| 300
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : int = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ) -> List[Any]:
for attribute in key.split("." ):
A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
A_ : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Any = value
elif weight_type == "bias":
A_ : str = value
else:
A_ : Any = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ) -> List[str]:
A_ : Optional[Any] = []
A_ : Any = fairseq_model.state_dict()
A_ : Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A_ : str = None
for name, value in fairseq_dict.items():
A_ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
A_ : Optional[Any] = True
elif name.split("." )[0] == "proj":
A_ : Dict = fairseq_model.proj
A_ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ : int = True
if "*" in mapped_key:
A_ : Optional[Any] = name.split(_lowerCAmelCase )[0].split("." )[-2]
A_ : int = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
A_ : List[Any] = "weight_g"
elif "weight_v" in name:
A_ : List[Any] = "weight_v"
elif "bias" in name:
A_ : Dict = "bias"
elif "weight" in name:
A_ : List[Any] = "weight"
else:
A_ : Dict = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str:
A_ : Any = full_name.split("conv_layers." )[-1]
A_ : Optional[int] = name.split("." )
A_ : Optional[Any] = int(items[0] )
A_ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
A_ : List[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
A_ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
A_ : List[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
A_ : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : Optional[int] ) -> str:
A_ , A_ : List[str] = emb.weight.shape
A_ : Optional[int] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
A_ : List[Any] = emb.weight.data
return lin_layer
def __snake_case ( _lowerCAmelCase : str ) -> Tuple:
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
A_ : int = f.readlines()
A_ : Dict = [line.split(" " )[0] for line in lines]
A_ : Tuple = len(_lowerCAmelCase )
A_ : Union[str, Any] = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , ) -> Tuple:
A_ : Optional[int] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
A_ : str = SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase )
A_ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
A_ , A_ , A_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
A_ : Union[str, Any] = model[0].eval()
# set weights for wav2vec2 encoder
A_ : Tuple = WavaVecaModel(_lowerCAmelCase )
A_ : str = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
A_ : Tuple = SpeechaTextaForCausalLM(_lowerCAmelCase )
A_ , A_ : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
A_ : Union[str, Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
A_ : str = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
A_ : Optional[Any] = False
# add projection layer
A_ : Optional[Any] = nn.Parameter(projection_layer.weight )
A_ : int = nn.Parameter(projection_layer.bias )
A_ : str = create_vocab_dict(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "vocab.json" ) , "w" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
A_ : Any = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , "vocab.json" ) )
tokenizer.save_pretrained(_lowerCAmelCase )
A_ : Optional[int] = hf_wavavec.config.to_dict()
A_ : int = tokenizer.pad_token_id
A_ : List[str] = tokenizer.bos_token_id
A_ : List[str] = tokenizer.eos_token_id
A_ : List[str] = "speech_to_text_2"
A_ : Tuple = "wav2vec2"
A_ : str = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=10_224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 300
| 1
|
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self :Optional[Any] , snake_case :int , snake_case :int , snake_case :int , snake_case :str=0.0 , snake_case :Optional[int] = None , snake_case :str = "geglu" , snake_case :Optional[int] = None , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , snake_case :bool = True , snake_case :str = "layer_norm" , snake_case :bool = False , ):
'''simple docstring'''
super().__init__()
A_ : Any = only_cross_attention
A_ : List[str] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
A_ : str = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A_ : int = AdaLayerNorm(snake_case , snake_case )
elif self.use_ada_layer_norm_zero:
A_ : List[str] = AdaLayerNormZero(snake_case , snake_case )
else:
A_ : Union[str, Any] = nn.LayerNorm(snake_case , elementwise_affine=snake_case )
A_ : Any = Attention(
query_dim=snake_case , heads=snake_case , dim_head=snake_case , dropout=snake_case , bias=snake_case , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=snake_case , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A_ : Union[str, Any] = (
AdaLayerNorm(snake_case , snake_case )
if self.use_ada_layer_norm
else nn.LayerNorm(snake_case , elementwise_affine=snake_case )
)
A_ : str = Attention(
query_dim=snake_case , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=snake_case , dim_head=snake_case , dropout=snake_case , bias=snake_case , upcast_attention=snake_case , ) # is self-attn if encoder_hidden_states is none
else:
A_ : List[str] = None
A_ : Optional[int] = None
# 3. Feed-forward
A_ : List[str] = nn.LayerNorm(snake_case , elementwise_affine=snake_case )
A_ : str = FeedForward(snake_case , dropout=snake_case , activation_fn=snake_case , final_dropout=snake_case )
# let chunk size default to None
A_ : Tuple = None
A_ : Any = 0
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Optional[int] , snake_case :int ):
'''simple docstring'''
A_ : str = chunk_size
A_ : Union[str, Any] = dim
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[torch.LongTensor] = None , snake_case :Dict[str, Any] = None , snake_case :Optional[torch.LongTensor] = None , ):
'''simple docstring'''
if self.use_ada_layer_norm:
A_ : Dict = self.norma(snake_case , snake_case )
elif self.use_ada_layer_norm_zero:
A_ , A_ , A_ , A_ , A_ : int = self.norma(
snake_case , snake_case , snake_case , hidden_dtype=hidden_states.dtype )
else:
A_ : List[Any] = self.norma(snake_case )
A_ : List[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A_ : int = self.attna(
snake_case , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=snake_case , **snake_case , )
if self.use_ada_layer_norm_zero:
A_ : List[str] = gate_msa.unsqueeze(1 ) * attn_output
A_ : str = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A_ : Any = (
self.norma(snake_case , snake_case ) if self.use_ada_layer_norm else self.norma(snake_case )
)
A_ : Optional[int] = self.attna(
snake_case , encoder_hidden_states=snake_case , attention_mask=snake_case , **snake_case , )
A_ : Any = attn_output + hidden_states
# 3. Feed-forward
A_ : int = self.norma(snake_case )
if self.use_ada_layer_norm_zero:
A_ : Any = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." )
A_ : str = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A_ : List[str] = torch.cat(
[self.ff(snake_case ) for hid_slice in norm_hidden_states.chunk(snake_case , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A_ : Union[str, Any] = self.ff(snake_case )
if self.use_ada_layer_norm_zero:
A_ : Tuple = gate_mlp.unsqueeze(1 ) * ff_output
A_ : Dict = ff_output + hidden_states
return hidden_states
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self :Any , snake_case :int , snake_case :Optional[int] = None , snake_case :int = 4 , snake_case :float = 0.0 , snake_case :str = "geglu" , snake_case :bool = False , ):
'''simple docstring'''
super().__init__()
A_ : Dict = int(dim * mult )
A_ : Optional[int] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A_ : Optional[int] = GELU(snake_case , snake_case )
if activation_fn == "gelu-approximate":
A_ : Dict = GELU(snake_case , snake_case , approximate="tanh" )
elif activation_fn == "geglu":
A_ : Dict = GEGLU(snake_case , snake_case )
elif activation_fn == "geglu-approximate":
A_ : List[str] = ApproximateGELU(snake_case , snake_case )
A_ : Tuple = nn.ModuleList([] )
# project in
self.net.append(snake_case )
# project dropout
self.net.append(nn.Dropout(snake_case ) )
# project out
self.net.append(nn.Linear(snake_case , snake_case ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(snake_case ) )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Tuple ):
'''simple docstring'''
for module in self.net:
A_ : Optional[Any] = module(snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self :int , snake_case :int , snake_case :int , snake_case :str = "none" ):
'''simple docstring'''
super().__init__()
A_ : int = nn.Linear(snake_case , snake_case )
A_ : int = approximate
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(snake_case , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Optional[Any] ):
'''simple docstring'''
A_ : Tuple = self.proj(snake_case )
A_ : int = self.gelu(snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self :List[Any] , snake_case :int , snake_case :int ):
'''simple docstring'''
super().__init__()
A_ : Tuple = nn.Linear(snake_case , dim_out * 2 )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :List[str] ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(snake_case )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :str ):
'''simple docstring'''
A_ , A_ : List[Any] = self.proj(snake_case ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(snake_case )
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self :str , snake_case :int , snake_case :int ):
'''simple docstring'''
super().__init__()
A_ : Tuple = nn.Linear(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Tuple = self.proj(snake_case )
return x * torch.sigmoid(1.702 * x )
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self :Optional[Any] , snake_case :int , snake_case :Any ):
'''simple docstring'''
super().__init__()
A_ : Any = nn.Embedding(snake_case , snake_case )
A_ : str = nn.SiLU()
A_ : str = nn.Linear(snake_case , embedding_dim * 2 )
A_ : Optional[Any] = nn.LayerNorm(snake_case , elementwise_affine=snake_case )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :int , snake_case :Dict ):
'''simple docstring'''
A_ : List[Any] = self.linear(self.silu(self.emb(snake_case ) ) )
A_ , A_ : Any = torch.chunk(snake_case , 2 )
A_ : Optional[int] = self.norm(snake_case ) * (1 + scale) + shift
return x
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self :Dict , snake_case :str , snake_case :Dict ):
'''simple docstring'''
super().__init__()
A_ : Any = CombinedTimestepLabelEmbeddings(snake_case , snake_case )
A_ : Union[str, Any] = nn.SiLU()
A_ : Dict = nn.Linear(snake_case , 6 * embedding_dim , bias=snake_case )
A_ : List[str] = nn.LayerNorm(snake_case , elementwise_affine=snake_case , eps=1e-6 )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :List[str] , snake_case :Dict , snake_case :str , snake_case :Optional[Any]=None ):
'''simple docstring'''
A_ : Optional[Any] = self.linear(self.silu(self.emb(snake_case , snake_case , hidden_dtype=snake_case ) ) )
A_ , A_ , A_ , A_ , A_ , A_ : Optional[Any] = emb.chunk(6 , dim=1 )
A_ : List[str] = self.norm(snake_case ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self :Optional[int] , snake_case :int , snake_case :int , snake_case :int , snake_case :Optional[str] = None , snake_case :float = 1e-5 ):
'''simple docstring'''
super().__init__()
A_ : Optional[int] = num_groups
A_ : str = eps
if act_fn is None:
A_ : List[str] = None
else:
A_ : Optional[int] = get_activation(snake_case )
A_ : Any = nn.Linear(snake_case , out_dim * 2 )
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Tuple , snake_case :Optional[int] ):
'''simple docstring'''
if self.act:
A_ : int = self.act(snake_case )
A_ : List[Any] = self.linear(snake_case )
A_ : Any = emb[:, :, None, None]
A_ , A_ : List[str] = emb.chunk(2 , dim=1 )
A_ : List[str] = F.group_norm(snake_case , self.num_groups , eps=self.eps )
A_ : List[str] = x * (1 + scale) + shift
return x
| 300
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __magic_name__ :
"""simple docstring"""
def __init__( self :Tuple , snake_case :Optional[Any] , snake_case :Tuple=13 , snake_case :Dict=7 , snake_case :List[Any]=True , snake_case :List[Any]=True , snake_case :Dict=True , snake_case :Any=True , snake_case :Optional[int]=99 , snake_case :Any=32 , snake_case :Dict=2 , snake_case :int=4 , snake_case :Optional[int]=37 , snake_case :List[str]="gelu" , snake_case :List[Any]=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Tuple=512 , snake_case :Tuple=16 , snake_case :Tuple=2 , snake_case :Optional[int]=0.02 , snake_case :str=3 , snake_case :Optional[int]=4 , snake_case :List[str]=None , snake_case :Tuple=1_000 , ):
'''simple docstring'''
A_ : str = parent
A_ : str = batch_size
A_ : str = seq_length
A_ : Any = is_training
A_ : Any = use_input_mask
A_ : str = use_token_type_ids
A_ : Tuple = use_labels
A_ : Optional[Any] = vocab_size
A_ : Dict = hidden_size
A_ : str = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : str = intermediate_size
A_ : int = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : Any = type_sequence_label_size
A_ : Dict = initializer_range
A_ : Any = num_labels
A_ : Optional[int] = num_choices
A_ : Optional[Any] = scope
A_ : Any = range_bbox
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ : str = bbox[i, j, 3]
A_ : Union[str, Any] = bbox[i, j, 1]
A_ : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ : Any = bbox[i, j, 2]
A_ : Tuple = bbox[i, j, 0]
A_ : int = t
A_ : int = tf.convert_to_tensor(snake_case )
A_ : Any = None
if self.use_input_mask:
A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : str = None
if self.use_token_type_ids:
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Dict = None
A_ : List[Any] = None
A_ : List[str] = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : str = ids_tensor([self.batch_size] , self.num_choices )
A_ : int = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , snake_case :Union[str, Any] , snake_case :int , snake_case :int , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Optional[int] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Any = TFLayoutLMModel(config=snake_case )
A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
A_ : str = model(snake_case , snake_case , token_type_ids=snake_case )
A_ : List[Any] = model(snake_case , snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :List[Any] , snake_case :List[str] , snake_case :Optional[Any] , snake_case :Dict , snake_case :Any , snake_case :Union[str, Any] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Optional[int] = TFLayoutLMForMaskedLM(config=snake_case )
A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , snake_case :Tuple , snake_case :Tuple , snake_case :List[str] , snake_case :Tuple , snake_case :str , snake_case :Optional[int] , snake_case :Any ):
'''simple docstring'''
A_ : Union[str, Any] = self.num_labels
A_ : int = TFLayoutLMForSequenceClassification(config=snake_case )
A_ : Optional[int] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict , snake_case :str , snake_case :Optional[Any] , snake_case :int , snake_case :Any , snake_case :Tuple , snake_case :List[str] , snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.num_labels
A_ : str = TFLayoutLMForTokenClassification(config=snake_case )
A_ : Union[str, Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[str] , snake_case :Optional[int] , snake_case :Union[str, Any] , snake_case :List[Any] , snake_case :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ):
'''simple docstring'''
A_ : Optional[Any] = TFLayoutLMForQuestionAnswering(config=snake_case )
A_ : List[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__UpperCamelCase = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 10
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Tuple = TFLayoutLMModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[str] = TFLayoutLMModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def __snake_case ( ) -> Optional[Any]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
A_ : int = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
A_ : int = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
A_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
A_ : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
A_ : Tuple = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : str = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
A_ , A_ , A_ , A_ , A_ : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Tuple = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the sequence output on [0, :3, :3]
A_ : List[Any] = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-3 ) )
# test the pooled output on [1, :3]
A_ : Optional[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
A_ , A_ , A_ , A_ , A_ : Any = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Dict = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
A_ : List[str] = outputs.loss
A_ : Union[str, Any] = (2,)
self.assertEqual(loss.shape , snake_case )
# test the shape of the logits
A_ : Tuple = outputs.logits
A_ : Tuple = (2, 2)
self.assertEqual(logits.shape , snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : int = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
A_ , A_ , A_ , A_ , A_ : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Union[str, Any] = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
# test the shape of the logits
A_ : Dict = outputs.logits
A_ : List[Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
A_ , A_ , A_ , A_ , A_ : str = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Union[str, Any] = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the shape of the logits
A_ : Union[str, Any] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , snake_case )
self.assertEqual(outputs.end_logits.shape , snake_case )
| 300
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str]=False ) -> Any:
A_ : Optional[int] = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
A_ : Tuple = ""
else:
A_ : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Any = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
A_ : List[str] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[
: config.hidden_size, :
]
A_ : int = in_proj_bias[: config.hidden_size]
A_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A_ : List[Any] = in_proj_bias[-config.hidden_size :]
def __snake_case ( _lowerCAmelCase : Any ) -> Tuple:
A_ : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] ) -> Union[str, Any]:
A_ : Dict = dct.pop(_lowerCAmelCase )
A_ : Optional[Any] = val
def __snake_case ( ) -> str:
A_ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ : List[str] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[str]=False ) -> Union[str, Any]:
A_ : str = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_lowerCAmelCase , )
A_ : Dict = ViTHybridConfig(backbone_config=_lowerCAmelCase , image_size=384 , num_labels=1000 )
A_ : Optional[int] = False
# load original model from timm
A_ : Any = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Optional[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
A_ : Tuple = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A_ : Any = "huggingface/label-files"
A_ : Tuple = "imagenet-1k-id2label.json"
A_ : Union[str, Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
A_ : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ : Tuple = ViTHybridModel(_lowerCAmelCase ).eval()
else:
A_ : Tuple = ViTHybridForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# create image processor
A_ : Dict = create_transform(**resolve_data_config({} , model=_lowerCAmelCase ) )
A_ : str = transform.transforms
A_ : int = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A_ : str = ViTHybridImageProcessor(
do_resize=_lowerCAmelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCAmelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A_ : Any = prepare_img()
A_ : List[Any] = transform(_lowerCAmelCase ).unsqueeze(0 )
A_ : Optional[Any] = processor(_lowerCAmelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase )
# verify logits
with torch.no_grad():
A_ : str = model(_lowerCAmelCase )
A_ : Any = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
A_ : Optional[Any] = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
A_ : Any = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
_lowerCAmelCase : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 300
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase : Optional[int] = '''
Human: <<task>>
Assistant: '''
_lowerCAmelCase : int = '''huggingface-tools/default-prompts'''
_lowerCAmelCase : Any = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict="run" ) -> List[Any]:
if prompt_or_repo_id is None:
A_ : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
A_ : Optional[Any] = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 300
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Tuple = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = SpeechTaTokenizer
__UpperCamelCase = False
__UpperCamelCase = True
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A_ : Union[str, Any] = SpeechTaTokenizer(snake_case )
A_ : Union[str, Any] = AddedToken("<mask>" , lstrip=snake_case , rstrip=snake_case )
A_ : Any = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any ):
'''simple docstring'''
A_ : List[str] = "this is a test"
A_ : str = "this is a test"
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self :str , snake_case :str , snake_case :Union[str, Any]=False , snake_case :str=20 , snake_case :str=5 ):
'''simple docstring'''
A_ , A_ : Union[str, Any] = self.get_input_output_texts(snake_case )
A_ : Optional[int] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
A_ : Optional[int] = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case )
return text, ids
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Tuple = "<pad>"
A_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(snake_case ) , 81 )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Optional[int] = self.get_tokenizers(do_lower_case=snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
A_ : List[str] = tokenizer.vocab_size
A_ : Optional[int] = len(snake_case )
self.assertNotEqual(snake_case , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A_ : str = ["aaaaa bbbbbb", "cccccccccdddddddd"]
A_ : Dict = tokenizer.add_tokens(snake_case )
A_ : List[Any] = tokenizer.vocab_size
A_ : Optional[Any] = len(snake_case )
self.assertNotEqual(snake_case , 0 )
self.assertEqual(snake_case , snake_case )
self.assertEqual(snake_case , len(snake_case ) )
self.assertEqual(snake_case , all_size + len(snake_case ) )
A_ : Union[str, Any] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=snake_case )
self.assertGreaterEqual(len(snake_case ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A_ : Any = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
A_ : Any = tokenizer.add_special_tokens(snake_case )
A_ : Tuple = tokenizer.vocab_size
A_ : Tuple = len(snake_case )
self.assertNotEqual(snake_case , 0 )
self.assertEqual(snake_case , snake_case )
self.assertEqual(snake_case , len(snake_case ) )
self.assertEqual(snake_case , all_size_a + len(snake_case ) )
A_ : str = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=snake_case )
self.assertGreaterEqual(len(snake_case ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Any = self.get_tokenizer()
A_ : List[str] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(snake_case , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
A_ : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
A_ : Any = tokenizer.convert_tokens_to_ids(snake_case )
# fmt: off
self.assertListEqual(snake_case , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
A_ : Dict = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Dict = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
A_ : str = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=snake_case , )
| 300
|
def __snake_case ( _lowerCAmelCase : list ) -> list:
if len(_lowerCAmelCase ) <= 1:
return [tuple(_lowerCAmelCase )]
A_ : Tuple = []
def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ):
A_ : List[str] = [0] * n
res.append(tuple(_lowerCAmelCase ) )
A_ : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
A_ , A_ : str = arr[i], arr[0]
else:
A_ , A_ : List[str] = arr[i], arr[c[i]]
res.append(tuple(_lowerCAmelCase ) )
c[i] += 1
A_ : Tuple = 0
else:
A_ : Dict = 0
i += 1
generate(len(_lowerCAmelCase ) , _lowerCAmelCase )
return res
if __name__ == "__main__":
_lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 300
| 1
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :List[str] , **snake_case :Tuple ):
'''simple docstring'''
super().__init__(**snake_case )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(snake_case )
def __call__( self :Optional[int] , snake_case :Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case :Union[str, List[str]] = None , **snake_case :str , ):
'''simple docstring'''
if "text_queries" in kwargs:
A_ : Any = kwargs.pop("text_queries" )
if isinstance(snake_case , (str, Image.Image) ):
A_ : List[str] = {"image": image, "candidate_labels": candidate_labels}
else:
A_ : str = image
A_ : Union[str, Any] = super().__call__(snake_case , **snake_case )
return results
def SCREAMING_SNAKE_CASE ( self :List[Any] , **snake_case :Tuple ):
'''simple docstring'''
A_ : List[Any] = {}
if "threshold" in kwargs:
A_ : List[str] = kwargs["threshold"]
if "top_k" in kwargs:
A_ : Union[str, Any] = kwargs["top_k"]
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :Optional[int] ):
'''simple docstring'''
A_ : int = load_image(inputs["image"] )
A_ : List[Any] = inputs["candidate_labels"]
if isinstance(snake_case , snake_case ):
A_ : Tuple = candidate_labels.split("," )
A_ : int = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case ):
A_ : Optional[Any] = self.tokenizer(snake_case , return_tensors=self.framework )
A_ : List[str] = self.image_processor(snake_case , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Any ):
'''simple docstring'''
A_ : Optional[int] = model_inputs.pop("target_size" )
A_ : List[Any] = model_inputs.pop("candidate_label" )
A_ : Union[str, Any] = model_inputs.pop("is_last" )
A_ : List[Any] = self.model(**snake_case )
A_ : str = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :int , snake_case :List[str]=0.1 , snake_case :Any=None ):
'''simple docstring'''
A_ : List[str] = []
for model_output in model_outputs:
A_ : str = model_output["candidate_label"]
A_ : List[Any] = BaseModelOutput(snake_case )
A_ : List[str] = self.image_processor.post_process_object_detection(
outputs=snake_case , threshold=snake_case , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
A_ : Optional[int] = outputs["scores"][index].item()
A_ : List[str] = self._get_bounding_box(outputs["boxes"][index][0] )
A_ : Tuple = {"score": score, "label": label, "box": box}
results.append(snake_case )
A_ : List[Any] = sorted(snake_case , key=lambda snake_case : x["score"] , reverse=snake_case )
if top_k:
A_ : Optional[Any] = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :"torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ : List[str] = box.int().tolist()
A_ : Union[str, Any] = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 300
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : List[Any] = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : Any = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = RobertaTokenizer
def __init__( self :Dict , snake_case :List[str]=None , snake_case :List[Any]=None , snake_case :Union[str, Any]=None , snake_case :List[str]="replace" , snake_case :Tuple="<s>" , snake_case :Union[str, Any]="</s>" , snake_case :str="</s>" , snake_case :Union[str, Any]="<s>" , snake_case :int="<unk>" , snake_case :Tuple="<pad>" , snake_case :List[str]="<mask>" , snake_case :Any=False , snake_case :Union[str, Any]=True , **snake_case :Optional[int] , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
A_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : Dict = getattr(snake_case , pre_tok_state.pop("type" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**snake_case )
A_ : Optional[int] = add_prefix_space
A_ : Optional[int] = "post_processor"
A_ : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
A_ : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : List[Any] = tuple(state["sep"] )
if "cls" in state:
A_ : Optional[Any] = tuple(state["cls"] )
A_ : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : List[Any] = add_prefix_space
A_ : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
A_ : List[str] = trim_offsets
A_ : Any = True
if changes_to_apply:
A_ : Optional[Any] = getattr(snake_case , state.pop("type" ) )
A_ : Any = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Dict ):
'''simple docstring'''
A_ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
A_ : Any = value
def SCREAMING_SNAKE_CASE ( self :Dict , *snake_case :Tuple , **snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : Any = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , *snake_case :str , **snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : Any = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Optional[Any]=None ):
'''simple docstring'''
A_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : Any = [self.sep_token_id]
A_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 300
| 1
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowerCAmelCase : int = logging.getLogger(__name__)
def __snake_case ( _lowerCAmelCase : torch.nn.Module , _lowerCAmelCase : BnbQuantizationConfig , _lowerCAmelCase : Union[str, os.PathLike] = None , _lowerCAmelCase : Optional[Dict[str, Union[int, str, torch.device]]] = None , _lowerCAmelCase : Optional[List[str]] = None , _lowerCAmelCase : Optional[Dict[Union[int, str], Union[int, str]]] = None , _lowerCAmelCase : Optional[Union[str, os.PathLike]] = None , _lowerCAmelCase : bool = False , ) -> List[Any]:
A_ : List[Any] = bnb_quantization_config.load_in_abit
A_ : List[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
A_ : Optional[int] = []
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
A_ : Optional[Any] = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A_ : int = get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
A_ : List[str] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A_ : List[str] = []
A_ : Optional[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
A_ : List[Any] = load_in_abit
A_ : str = load_in_abit
A_ : Any = get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
A_ : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
A_ : Optional[int] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A_ : Tuple = name.replace(".weight" , "" ).replace(".bias" , "" )
A_ : List[str] = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
f"The model device type is {model_device.type}. However, cuda is needed for quantization."
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
A_ : List[str] = replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
A_ : Tuple = get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A_ : Optional[int] = True
A_ : Optional[int] = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Any=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=None ) -> Dict:
if device_map is None:
if torch.cuda.is_available():
A_ : Union[str, Any] = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
A_ : Union[str, Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A_ : Any = {}
A_ : List[Any] = special_dtypes
A_ : Union[str, Any] = no_split_module_classes
A_ : List[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A_ : str = get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == "balanced_low_0") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
A_ : Dict = max_memory
A_ : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
A_ : List[str] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A_ : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : int=None ) -> Union[str, Any]:
if modules_to_not_convert is None:
A_ : Dict = []
A_ , A_ : List[Any] = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : str=None , _lowerCAmelCase : List[Any]=None , ) -> Dict:
A_ : Union[str, Any] = False
for name, module in model.named_children():
if current_key_name is None:
A_ : List[Any] = []
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A_ : int = ".".join(_lowerCAmelCase )
A_ : int = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A_ : Union[str, Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A_ : str = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A_ : int = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
A_ : Optional[int] = module.weight.data
if module.bias is not None:
A_ : List[Any] = module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A_ : Optional[int] = True
if len(list(module.children() ) ) > 0:
A_ , A_ : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A_ : Dict = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowerCAmelCase : Dict ) -> Any:
# Create a copy of the model
with init_empty_weights():
A_ : Optional[Any] = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A_ : str = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A_ : Dict = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A_ : Any = sum(_lowerCAmelCase , [] )
A_ : List[Any] = len(_lowerCAmelCase ) > 0
# Check if it is a base model
A_ : Dict = False
if hasattr(_lowerCAmelCase , "base_model_prefix" ):
A_ : Union[str, Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A_ : Optional[Any] = list(model.named_children() )
A_ : str = [list_modules[-1][0]]
# add last module together with tied weights
A_ : str = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
A_ : int = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
A_ : Any = [".weight", ".bias"]
A_ : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A_ : Tuple = name.replace(_lowerCAmelCase , "" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def __snake_case ( _lowerCAmelCase : List[str] ) -> Any:
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __snake_case ( _lowerCAmelCase : nn.Module ) -> List[Any]:
return next(parameter.parameters() ).device
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> Optional[Any]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
A_ : int = param_name
A_ : str = model
if "." in tensor_name:
A_ : List[Any] = tensor_name.split("." )
for split in splits[:-1]:
A_ : str = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
A_ : Union[str, Any] = new_module
A_ : int = splits[-1]
# offload weights
A_ : str = False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("weight" , "SCB" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , "meta" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 300
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCAmelCase : int = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
_lowerCAmelCase : Tuple = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
_lowerCAmelCase : int = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[List[List[str]]] , snake_case :List[List[str]] , snake_case :int = 1 , snake_case :int = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case , hypotheses=snake_case , min_len=snake_case , max_len=snake_case )
}
| 300
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , snake_case :AutoencoderKL , snake_case :CLIPTextModel , snake_case :CLIPTokenizer , snake_case :UNetaDConditionModel , snake_case :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case :StableDiffusionSafetyChecker , snake_case :CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def __call__( self :Any , snake_case :Union[str, List[str]] , snake_case :int = 512 , snake_case :int = 512 , snake_case :int = 50 , snake_case :float = 7.5 , snake_case :Optional[Union[str, List[str]]] = None , snake_case :Optional[int] = 1 , snake_case :float = 0.0 , snake_case :Optional[torch.Generator] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[str] = "pil" , snake_case :bool = True , snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case :int = 1 , snake_case :Optional[torch.FloatTensor] = None , **snake_case :Optional[Any] , ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
A_ : Dict = 1
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = len(snake_case )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case )}." )
# get prompt text embeddings
A_ : int = self.tokenizer(
snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_ , A_ , A_ : int = text_embeddings.shape
A_ : List[str] = text_embeddings.repeat(1 , snake_case , 1 )
A_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ : List[str]
if negative_prompt is None:
A_ : List[str] = [""]
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !="
f" {type(snake_case )}." )
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A_ : Any = negative_prompt
A_ : Optional[int] = text_input_ids.shape[-1]
A_ : Dict = self.tokenizer(
snake_case , padding="max_length" , max_length=snake_case , truncation=snake_case , return_tensors="pt" , )
A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ : Tuple = uncond_embeddings.shape[1]
A_ : Dict = uncond_embeddings.repeat(snake_case , snake_case , 1 )
A_ : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A_ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ : Tuple = torch.randn(
snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(self.device )
A_ : Optional[Any] = torch.randn(snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(
self.device )
else:
A_ : int = torch.randn(
snake_case , generator=snake_case , device=self.device , dtype=snake_case )
A_ : Optional[int] = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A_ : Tuple = latents_reference.to(self.device )
A_ : Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A_ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A_ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A_ : Optional[Any] = 0 if dx < 0 else dx
A_ : Optional[Any] = 0 if dy < 0 else dy
A_ : List[str] = max(-dx , 0 )
A_ : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A_ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ : str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : List[str] = {}
if accepts_eta:
A_ : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
A_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
A_ : List[str] = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.chunk(2 )
A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
A_ : List[str] = 1 / 0.18215 * latents
A_ : Tuple = self.vae.decode(snake_case ).sample
A_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A_ : int = self.feature_extractor(self.numpy_to_pil(snake_case ) , return_tensors="pt" ).to(
self.device )
A_ , A_ : List[str] = self.safety_checker(
images=snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A_ : List[str] = None
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
| 300
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
A_ : Tuple = tmp_path / "cache"
A_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Optional[Any] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ) -> str:
A_ : List[Any] = tmp_path / "cache"
A_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : int = features.copy() if features else default_expected_features
A_ : str = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Union[str, Any] = ParquetDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ) -> Optional[Any]:
A_ : Dict = tmp_path / "cache"
A_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> List[str]:
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
A_ : int = parquet_path
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
A_ : Optional[int] = [parquet_path]
A_ : Optional[int] = tmp_path / "cache"
A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=("train",) ) -> Tuple:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
for split in splits:
A_ : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> Optional[int]:
A_ : Optional[Any] = tmp_path / "cache"
A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Union[str, Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str ) -> Tuple:
A_ : Optional[Any] = tmp_path / "cache"
A_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : List[str] = features.copy() if features else default_expected_features
A_ : Tuple = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Optional[int] = ParquetDatasetReader({"train": parquet_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Union[str, Any]:
if split:
A_ : Any = {split: parquet_path}
else:
A_ : Optional[Any] = "train"
A_ : str = {"train": parquet_path, "test": parquet_path}
A_ : Any = tmp_path / "cache"
A_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Dict = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ) -> Dict:
A_ : List[str] = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
A_ : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
A_ : Dict = pf.read()
assert dataset.data.table == output_table
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ) -> List[Any]:
A_ : Tuple = str(shared_datadir / "test_image_rgb.jpg" )
A_ : int = {"image": [image_path]}
A_ : Optional[Any] = Features({"image": Image()} )
A_ : Union[str, Any] = Dataset.from_dict(_lowerCAmelCase , features=_lowerCAmelCase )
A_ : Tuple = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
A_ : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
A_ : int = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCAmelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ) -> Any:
assert get_writer_batch_size(_lowerCAmelCase ) == expected
| 300
| 1
|
from PIL import Image
def __snake_case ( _lowerCAmelCase : Image ) -> Image:
A_ , A_ : List[str] = image.size
A_ : Union[str, Any] = 0
A_ : Union[str, Any] = image.load()
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A_ : List[str] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCAmelCase ):
for i in range(_lowerCAmelCase ):
A_ : List[Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_lowerCAmelCase : Dict = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 300
|
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]="shi-labs/oneformer_demo" ) -> int:
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) as f:
A_ : Optional[int] = json.load(_lowerCAmelCase )
A_ : Union[str, Any] = {}
A_ : Tuple = []
A_ : Optional[Any] = []
for key, info in class_info.items():
A_ : Tuple = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
A_ : Optional[Any] = thing_ids
A_ : int = class_names
return metadata
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :List[Any] , snake_case :List[str] , snake_case :int=7 , snake_case :Optional[int]=3 , snake_case :Union[str, Any]=30 , snake_case :Tuple=400 , snake_case :List[Any]=None , snake_case :Optional[Any]=True , snake_case :Tuple=True , snake_case :Dict=[0.5, 0.5, 0.5] , snake_case :Any=[0.5, 0.5, 0.5] , snake_case :Optional[int]=10 , snake_case :Tuple=False , snake_case :Optional[int]=255 , snake_case :Optional[Any]="shi-labs/oneformer_demo" , snake_case :Optional[Any]="ade20k_panoptic.json" , snake_case :Optional[int]=10 , ):
'''simple docstring'''
A_ : Tuple = parent
A_ : List[str] = batch_size
A_ : Optional[int] = num_channels
A_ : Tuple = min_resolution
A_ : List[Any] = max_resolution
A_ : Union[str, Any] = do_resize
A_ : Any = {"shortest_edge": 32, "longest_edge": 1_333} if size is None else size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : List[Any] = image_std
A_ : Union[str, Any] = class_info_file
A_ : List[Any] = prepare_metadata(snake_case , snake_case )
A_ : Tuple = num_text
A_ : str = repo_path
# for the post_process_functions
A_ : Any = 2
A_ : int = 10
A_ : Optional[int] = 10
A_ : Tuple = 3
A_ : Tuple = 4
A_ : str = num_labels
A_ : int = do_reduce_labels
A_ : List[Any] = ignore_index
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Any , snake_case :Any=False ):
'''simple docstring'''
if not batched:
A_ : List[str] = image_inputs[0]
if isinstance(snake_case , Image.Image ):
A_ , A_ : Dict = image.size
else:
A_ , A_ : Tuple = image.shape[1], image.shape[2]
if w < h:
A_ : str = int(self.size["shortest_edge"] * h / w )
A_ : Any = self.size["shortest_edge"]
elif w > h:
A_ : Optional[int] = self.size["shortest_edge"]
A_ : List[str] = int(self.size["shortest_edge"] * w / h )
else:
A_ : List[str] = self.size["shortest_edge"]
A_ : Optional[Any] = self.size["shortest_edge"]
else:
A_ : Tuple = []
for image in image_inputs:
A_ , A_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : Tuple = max(snake_case , key=lambda snake_case : item[0] )[0]
A_ : Union[str, Any] = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__UpperCamelCase = image_processing_class
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , "image_mean" ) )
self.assertTrue(hasattr(snake_case , "image_std" ) )
self.assertTrue(hasattr(snake_case , "do_normalize" ) )
self.assertTrue(hasattr(snake_case , "do_resize" ) )
self.assertTrue(hasattr(snake_case , "size" ) )
self.assertTrue(hasattr(snake_case , "ignore_index" ) )
self.assertTrue(hasattr(snake_case , "class_info_file" ) )
self.assertTrue(hasattr(snake_case , "num_text" ) )
self.assertTrue(hasattr(snake_case , "repo_path" ) )
self.assertTrue(hasattr(snake_case , "metadata" ) )
self.assertTrue(hasattr(snake_case , "do_reduce_labels" ) )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : str = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Optional[Any] = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : List[str] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
A_ : List[str] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : List[str] = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : int = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : Optional[Any] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
A_ : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : Any = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict=False , snake_case :str=False , snake_case :Dict="np" ):
'''simple docstring'''
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
A_ : Tuple = self.image_processing_tester.num_labels
A_ : str = None
A_ : Tuple = None
A_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
if with_segmentation_maps:
A_ : List[str] = num_labels
if is_instance_map:
A_ : List[str] = list(range(snake_case ) ) * 2
A_ : int = dict(enumerate(snake_case ) )
A_ : List[str] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
A_ : int = [Image.fromarray(snake_case ) for annotation in annotations]
A_ : List[str] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , snake_case , return_tensors="pt" , instance_id_to_semantic_id=snake_case , pad_and_return_pixel_mask=snake_case , )
return inputs
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
def common(snake_case :Dict=False , snake_case :Optional[int]=None ):
A_ : Tuple = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case , is_instance_map=snake_case , segmentation_type=snake_case )
A_ : Optional[Any] = inputs["mask_labels"]
A_ : List[Any] = inputs["class_labels"]
A_ : Optional[Any] = inputs["pixel_values"]
A_ : int = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case , snake_case , snake_case ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case )
common(is_instance_map=snake_case , segmentation_type="pil" )
common(is_instance_map=snake_case , segmentation_type="pil" )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = np.zeros((20, 50) )
A_ : List[str] = 1
A_ : int = 1
A_ : Optional[Any] = 1
A_ : Any = binary_mask_to_rle(snake_case )
self.assertEqual(len(snake_case ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : int = fature_extractor.post_process_semantic_segmentation(snake_case )
self.assertEqual(len(snake_case ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
A_ : Optional[int] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
A_ : List[Any] = fature_extractor.post_process_semantic_segmentation(snake_case , target_sizes=snake_case )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : str = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : Optional[Any] = image_processor.post_process_instance_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : Optional[Any] = image_processor.post_process_panoptic_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 300
| 1
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Dict:
A_ : Optional[Any] = nn.functional.normalize(_lowerCAmelCase )
A_ : List[str] = nn.functional.normalize(_lowerCAmelCase )
return torch.mm(_lowerCAmelCase , normalized_text_embeds.t() )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self :int , snake_case :CLIPConfig ):
'''simple docstring'''
super().__init__(snake_case )
A_ : int = CLIPVisionModel(config.vision_config )
A_ : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case )
A_ : Tuple = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case )
A_ : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case )
A_ : List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=snake_case )
A_ : int = nn.Parameter(torch.ones(3 ) , requires_grad=snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ):
'''simple docstring'''
A_ : List[Any] = self.vision_model(snake_case )[1] # pooled_output
A_ : List[Any] = self.visual_projection(snake_case )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Optional[Any] = cosine_distance(snake_case , self.special_care_embeds ).cpu().float().numpy()
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ).cpu().float().numpy()
A_ : Union[str, Any] = []
A_ : Any = image_embeds.shape[0]
for i in range(snake_case ):
A_ : Optional[int] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A_ : Optional[Any] = special_cos_dist[i][concept_idx]
A_ : Tuple = self.special_care_embeds_weights[concept_idx].item()
A_ : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
A_ : Any = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A_ : Tuple = cos_dist[i][concept_idx]
A_ : Tuple = self.concept_embeds_weights[concept_idx].item()
A_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(snake_case )
result.append(snake_case )
A_ : Any = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor ):
'''simple docstring'''
A_ : List[str] = self.vision_model(snake_case )[1] # pooled_output
A_ : int = self.visual_projection(snake_case )
A_ : Tuple = cosine_distance(snake_case , self.special_care_embeds )
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
A_ : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A_ : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
A_ : Optional[Any] = special_care * 0.01
A_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A_ : Union[str, Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A_ : Union[str, Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 300
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''data2vec-vision'''
def __init__( self :int , snake_case :Optional[int]=768 , snake_case :Any=12 , snake_case :Any=12 , snake_case :Tuple=3_072 , snake_case :Any="gelu" , snake_case :Tuple=0.0 , snake_case :int=0.0 , snake_case :Any=0.02 , snake_case :str=1e-12 , snake_case :List[str]=224 , snake_case :Dict=16 , snake_case :int=3 , snake_case :int=False , snake_case :str=False , snake_case :List[Any]=False , snake_case :Optional[Any]=False , snake_case :Tuple=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Any=True , snake_case :Optional[Any]=[3, 5, 7, 11] , snake_case :Dict=[1, 2, 3, 6] , snake_case :int=True , snake_case :List[Any]=0.4 , snake_case :Any=256 , snake_case :Union[str, Any]=1 , snake_case :Union[str, Any]=False , snake_case :Any=255 , **snake_case :int , ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : Dict = hidden_size
A_ : Tuple = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Any = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Any = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Optional[Any] = initializer_range
A_ : List[str] = layer_norm_eps
A_ : str = image_size
A_ : Optional[int] = patch_size
A_ : int = num_channels
A_ : Optional[Any] = use_mask_token
A_ : Optional[Any] = use_absolute_position_embeddings
A_ : Optional[int] = use_relative_position_bias
A_ : Dict = use_shared_relative_position_bias
A_ : Any = layer_scale_init_value
A_ : Optional[Any] = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : str = use_auxiliary_head
A_ : List[Any] = auxiliary_loss_weight
A_ : List[str] = auxiliary_channels
A_ : Dict = auxiliary_num_convs
A_ : List[str] = auxiliary_concat_input
A_ : Optional[int] = semantic_loss_ignore_index
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return 1e-4
| 300
| 1
|
def __snake_case ( _lowerCAmelCase : float , _lowerCAmelCase : float ) -> float:
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = ['''input_features''', '''attention_mask''']
def __init__( self :int , snake_case :int=80 , snake_case :Optional[int]=16_000 , snake_case :Tuple=0.0 , snake_case :Optional[int]=10 , snake_case :Optional[Any]=25 , snake_case :Dict="hamming_window" , snake_case :Tuple=32768.0 , snake_case :str=0.97 , snake_case :List[str]=1.0 , snake_case :Dict=True , snake_case :str=True , snake_case :Optional[Any]=False , **snake_case :Union[str, Any] , ):
'''simple docstring'''
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
A_ : Union[str, Any] = feature_size
A_ : int = sampling_rate
A_ : str = padding_value
A_ : int = hop_length
A_ : List[str] = win_length
A_ : Any = frame_signal_scale
A_ : str = preemphasis_coeff
A_ : List[str] = mel_floor
A_ : str = normalize_means
A_ : Any = normalize_vars
A_ : Optional[Any] = win_function
A_ : Dict = return_attention_mask
A_ : List[str] = win_length * sampling_rate // 1_000
A_ : List[str] = hop_length * sampling_rate // 1_000
A_ : List[str] = optimal_fft_length(self.sample_size )
A_ : str = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
A_ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case )
else:
A_ : List[str] = window_function(window_length=self.sample_size , name=self.win_function )
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
A_ : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=snake_case , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case , preemphasis=self.preemphasis_coeff , mel_filters=snake_case , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :str ):
'''simple docstring'''
if self.normalize_means:
A_ : int = x[:input_length].mean(axis=0 )
A_ : Any = np.subtract(snake_case , snake_case )
if self.normalize_vars:
A_ : List[Any] = x[:input_length].std(axis=0 )
A_ : Optional[int] = np.divide(snake_case , snake_case )
if input_length < x.shape[0]:
A_ : Optional[int] = padding_value
# make sure array is in float32
A_ : Union[str, Any] = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[np.ndarray] , snake_case :Optional[np.ndarray] = None ):
'''simple docstring'''
A_ : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case , snake_case , self.padding_value ) for x, n in zip(snake_case , snake_case )]
def __call__( self :int , snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case :Union[bool, str, PaddingStrategy] = False , snake_case :Optional[int] = None , snake_case :bool = False , snake_case :Optional[int] = None , snake_case :Optional[bool] = None , snake_case :Optional[Union[str, TensorType]] = None , snake_case :Optional[int] = None , **snake_case :Dict , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
A_ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
A_ : Optional[Any] = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : List[Any] = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
A_ : int = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Tuple = [raw_speech]
# extract fbank features
A_ : int = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
A_ : Union[str, Any] = BatchFeature({"input_features": features} )
A_ : str = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
# make sure list is in array format
A_ : Optional[int] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , snake_case ):
A_ : Union[str, Any] = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features]
A_ : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
A_ : Any = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
A_ : Dict = (
np.array(snake_case , dtype=np.intaa )
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
A_ : Optional[int] = self.normalize(
padded_inputs["input_features"] , attention_mask=snake_case )
if return_tensors is not None:
A_ : Dict = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 300
| 1
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 42
@flax_register_to_config
class __magic_name__ ( nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 32
__UpperCamelCase = 4
__UpperCamelCase = 4
__UpperCamelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__UpperCamelCase = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__UpperCamelCase = False
__UpperCamelCase = (3_20, 6_40, 12_80, 12_80)
__UpperCamelCase = 2
__UpperCamelCase = 8
__UpperCamelCase = None
__UpperCamelCase = 12_80
__UpperCamelCase = 0.0
__UpperCamelCase = False
__UpperCamelCase = jnp.floataa
__UpperCamelCase = True
__UpperCamelCase = 0
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :str , snake_case :jax.random.KeyArray ):
'''simple docstring'''
A_ : int = (1, self.in_channels, self.sample_size, self.sample_size)
A_ : Union[str, Any] = jnp.zeros(snake_case , dtype=jnp.floataa )
A_ : List[Any] = jnp.ones((1,) , dtype=jnp.intaa )
A_ : Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
A_ , A_ : List[Any] = jax.random.split(snake_case )
A_ : Union[str, Any] = {"params": params_rng, "dropout": dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case )["params"]
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Any = self.block_out_channels
A_ : Any = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
A_ : str = self.num_attention_heads or self.attention_head_dim
# input
A_ : Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
A_ : int = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
A_ : int = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
A_ : int = self.only_cross_attention
if isinstance(snake_case , snake_case ):
A_ : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
A_ : Union[str, Any] = (num_attention_heads,) * len(self.down_block_types )
# down
A_ : Optional[int] = []
A_ : Any = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
A_ : str = output_channel
A_ : Optional[int] = block_out_channels[i]
A_ : str = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
A_ : Optional[int] = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
A_ : List[str] = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
A_ : Dict = down_blocks
# mid
A_ : List[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
A_ : Union[str, Any] = []
A_ : Union[str, Any] = list(reversed(snake_case ) )
A_ : Any = list(reversed(snake_case ) )
A_ : Any = list(reversed(snake_case ) )
A_ : List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
A_ : Tuple = output_channel
A_ : Tuple = reversed_block_out_channels[i]
A_ : List[str] = reversed_block_out_channels[min(i + 1 , len(snake_case ) - 1 )]
A_ : int = i == len(snake_case ) - 1
if up_block_type == "CrossAttnUpBlock2D":
A_ : str = FlaxCrossAttnUpBlockaD(
in_channels=snake_case , out_channels=snake_case , prev_output_channel=snake_case , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
A_ : Optional[Any] = FlaxUpBlockaD(
in_channels=snake_case , out_channels=snake_case , prev_output_channel=snake_case , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(snake_case )
A_ : Union[str, Any] = output_channel
A_ : Optional[int] = up_blocks
# out
A_ : List[Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
A_ : Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self :Optional[Any] , snake_case :List[str] , snake_case :str , snake_case :Optional[int] , snake_case :Any=None , snake_case :List[str]=None , snake_case :bool = True , snake_case :bool = False , ):
'''simple docstring'''
if not isinstance(snake_case , jnp.ndarray ):
A_ : Any = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
A_ : Optional[Any] = timesteps.astype(dtype=jnp.floataa )
A_ : List[str] = jnp.expand_dims(snake_case , 0 )
A_ : Union[str, Any] = self.time_proj(snake_case )
A_ : str = self.time_embedding(snake_case )
# 2. pre-process
A_ : Any = jnp.transpose(snake_case , (0, 2, 3, 1) )
A_ : Dict = self.conv_in(snake_case )
# 3. down
A_ : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
A_ , A_ : Optional[Any] = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
A_ , A_ : str = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
A_ : Optional[int] = ()
for down_block_res_sample, down_block_additional_residual in zip(
snake_case , snake_case ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
A_ : List[Any] = new_down_block_res_samples
# 4. mid
A_ : Union[str, Any] = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
A_ : Optional[int] = down_block_res_samples[-(self.layers_per_block + 1) :]
A_ : str = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(snake_case , snake_case ):
A_ : Optional[int] = up_block(
snake_case , temb=snake_case , encoder_hidden_states=snake_case , res_hidden_states_tuple=snake_case , deterministic=not train , )
else:
A_ : Optional[Any] = up_block(snake_case , temb=snake_case , res_hidden_states_tuple=snake_case , deterministic=not train )
# 6. post-process
A_ : str = self.conv_norm_out(snake_case )
A_ : Tuple = nn.silu(snake_case )
A_ : Optional[int] = self.conv_out(snake_case )
A_ : int = jnp.transpose(snake_case , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=snake_case )
| 300
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self :List[Any] , snake_case :int , snake_case :int , snake_case :Optional[int] = None , snake_case :int = 50_257 , snake_case :int = 1_024 , snake_case :int = 768 , snake_case :int = 12 , snake_case :int = 12 , snake_case :Optional[int] = None , snake_case :str = "gelu_new" , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 1e-5 , snake_case :float = 0.02 , snake_case :bool = True , snake_case :bool = True , snake_case :bool = False , snake_case :bool = False , ):
'''simple docstring'''
super().__init__()
A_ : Tuple = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
A_ : List[Any] = prefix_inner_dim
A_ : Union[str, Any] = prefix_hidden_dim
A_ : List[str] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A_ : List[Any] = (
nn.Linear(self.prefix_hidden_dim , snake_case ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A_ : List[Any] = GPTaConfig(
vocab_size=snake_case , n_positions=snake_case , n_embd=snake_case , n_layer=snake_case , n_head=snake_case , n_inner=snake_case , activation_function=snake_case , resid_pdrop=snake_case , embd_pdrop=snake_case , attn_pdrop=snake_case , layer_norm_epsilon=snake_case , initializer_range=snake_case , scale_attn_weights=snake_case , use_cache=snake_case , scale_attn_by_inverse_layer_idx=snake_case , reorder_and_upcast_attn=snake_case , )
A_ : Optional[Any] = GPTaLMHeadModel(snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.Tensor , snake_case :torch.Tensor , snake_case :Optional[torch.Tensor] = None , snake_case :Optional[torch.Tensor] = None , ):
'''simple docstring'''
A_ : Any = self.transformer.transformer.wte(snake_case )
A_ : str = self.encode_prefix(snake_case )
A_ : Union[str, Any] = self.decode_prefix(snake_case )
A_ : int = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A_ : Dict = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A_ : int = torch.cat((dummy_token, input_ids) , dim=1 )
A_ : Union[str, Any] = self.transformer(inputs_embeds=snake_case , labels=snake_case , attention_mask=snake_case )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE ( self :str , snake_case :int , snake_case :torch.device ):
'''simple docstring'''
return torch.zeros(snake_case , self.prefix_length , dtype=torch.intaa , device=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :int ):
'''simple docstring'''
return self.encode_prefix(snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Dict , snake_case :Optional[int] , snake_case :Any ):
'''simple docstring'''
A_ : Any = torch.split(snake_case , 1 , dim=0 )
A_ : Optional[int] = []
A_ : Union[str, Any] = []
for feature in features:
A_ : Tuple = self.decode_prefix(feature.to(snake_case ) ) # back to the clip feature
# Only support beam search for now
A_ , A_ : Dict = self.generate_beam(
input_embeds=snake_case , device=snake_case , eos_token_id=snake_case )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A_ : int = torch.stack(snake_case )
A_ : int = torch.stack(snake_case )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int=None , snake_case :str=None , snake_case :int=None , snake_case :int = 5 , snake_case :int = 67 , snake_case :float = 1.0 , snake_case :Optional[int] = None , ):
'''simple docstring'''
A_ : Optional[Any] = eos_token_id
A_ : List[Any] = None
A_ : List[Any] = None
A_ : str = torch.ones(snake_case , device=snake_case , dtype=torch.int )
A_ : Any = torch.zeros(snake_case , device=snake_case , dtype=torch.bool )
if input_embeds is not None:
A_ : Any = input_embeds
else:
A_ : Optional[Any] = self.transformer.transformer.wte(snake_case )
for i in range(snake_case ):
A_ : Optional[Any] = self.transformer(inputs_embeds=snake_case )
A_ : str = outputs.logits
A_ : int = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A_ : List[str] = logits.softmax(-1 ).log()
if scores is None:
A_ , A_ : Union[str, Any] = logits.topk(snake_case , -1 )
A_ : Tuple = generated.expand(snake_case , *generated.shape[1:] )
A_ , A_ : str = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A_ : Union[str, Any] = next_tokens
else:
A_ : List[str] = tokens.expand(snake_case , *tokens.shape[1:] )
A_ : Union[str, Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
A_ : List[str] = -float(np.inf )
A_ : List[Any] = 0
A_ : Union[str, Any] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A_ : Optional[Any] = scores_sum / seq_lengths[:, None]
A_ , A_ : List[str] = scores_sum_average.view(-1 ).topk(snake_case , -1 )
A_ : str = next_tokens // scores_sum.shape[1]
A_ : Union[str, Any] = seq_lengths[next_tokens_source]
A_ : Optional[int] = next_tokens % scores_sum.shape[1]
A_ : Tuple = next_tokens.unsqueeze(1 )
A_ : Tuple = tokens[next_tokens_source]
A_ : Dict = torch.cat((tokens, next_tokens) , dim=1 )
A_ : Dict = generated[next_tokens_source]
A_ : Union[str, Any] = scores_sum_average * seq_lengths
A_ : Optional[int] = is_stopped[next_tokens_source]
A_ : Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A_ : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1 )
A_ : Any = is_stopped + next_tokens.eq(snake_case ).squeeze()
if is_stopped.all():
break
A_ : int = scores / seq_lengths
A_ : str = scores.argsort(descending=snake_case )
# tokens tensors are already padded to max_seq_length
A_ : Dict = [tokens[i] for i in order]
A_ : int = torch.stack(snake_case , dim=0 )
A_ : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 300
| 1
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {'''vocab_file''': '''spiece.model'''}
_lowerCAmelCase : str = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
_lowerCAmelCase : int = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
_lowerCAmelCase : List[str] = '''▁'''
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self :Dict , snake_case :List[str] , snake_case :Optional[Any]="</s>" , snake_case :str="<unk>" , snake_case :Union[str, Any]="<pad>" , snake_case :int=100 , snake_case :List[str]=None , snake_case :Optional[Dict[str, Any]] = None , snake_case :Union[str, Any]=True , **snake_case :Optional[int] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
A_ : Optional[Any] = [f"<extra_id_{i}>" for i in range(snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
A_ : int = len(set(filter(lambda snake_case : bool("extra_id" in str(snake_case ) ) , snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
f"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
A_ : Union[str, Any] = legacy
A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , extra_ids=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case , **snake_case , )
A_ : List[str] = vocab_file
A_ : List[str] = extra_ids
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :str , snake_case :Optional[int] , snake_case :List[str] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
A_ : Tuple = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , snake_case , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[int] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :List[int] , snake_case :Optional[List[int]] = None , snake_case :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case )) + [1]
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
return list(
set(filter(lambda snake_case : bool(re.search(R"<extra_id_\d+>" , snake_case ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return [self._convert_token_to_id(snake_case ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[int] ):
'''simple docstring'''
if len(snake_case ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : Optional[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : Optional[int] = self._add_eos_if_not_present(snake_case )
if token_ids_a is None:
return token_ids_a
else:
A_ : List[str] = self._add_eos_if_not_present(snake_case )
return token_ids_a + token_ids_a
def __getstate__( self :int ):
'''simple docstring'''
A_ : Dict = self.__dict__.copy()
A_ : str = None
return state
def __setstate__( self :Union[str, Any] , snake_case :Optional[Any] ):
'''simple docstring'''
A_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A_ : Any = {}
A_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :"TextInput" , **snake_case :Tuple ):
'''simple docstring'''
if not self.legacy:
A_ : Optional[int] = SPIECE_UNDERLINE + text.replace(snake_case , " " )
return super().tokenize(snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , **snake_case :Any ):
'''simple docstring'''
if not self.legacy:
A_ : Tuple = text.startswith(snake_case )
if is_first:
A_ : Optional[Any] = text[1:]
A_ : Tuple = self.sp_model.encode(snake_case , out_type=snake_case )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(snake_case ):
A_ : Optional[Any] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Any ):
'''simple docstring'''
if token.startswith("<extra_id_" ):
A_ : List[str] = re.match(R"<extra_id_(\d+)>" , snake_case )
A_ : Optional[Any] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[Any] ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
A_ : Tuple = self.sp_model.IdToPiece(snake_case )
else:
A_ : Optional[Any] = f"<extra_id_{self.vocab_size - 1 - index}>"
return token
def SCREAMING_SNAKE_CASE ( self :str , snake_case :int ):
'''simple docstring'''
A_ : Tuple = []
A_ : str = ""
A_ : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case ) + token
A_ : Union[str, Any] = True
A_ : Tuple = []
else:
current_sub_tokens.append(snake_case )
A_ : int = False
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A_ : Optional[Any] = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , "wb" ) as fi:
A_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 300
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *snake_case :Tuple , **snake_case :Any ):
'''simple docstring'''
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , snake_case , )
super().__init__(*snake_case , **snake_case )
| 300
| 1
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = ['''input_features''', '''attention_mask''']
def __init__( self :int , snake_case :int=80 , snake_case :Optional[int]=16_000 , snake_case :Tuple=0.0 , snake_case :Optional[int]=10 , snake_case :Optional[Any]=25 , snake_case :Dict="hamming_window" , snake_case :Tuple=32768.0 , snake_case :str=0.97 , snake_case :List[str]=1.0 , snake_case :Dict=True , snake_case :str=True , snake_case :Optional[Any]=False , **snake_case :Union[str, Any] , ):
'''simple docstring'''
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
A_ : Union[str, Any] = feature_size
A_ : int = sampling_rate
A_ : str = padding_value
A_ : int = hop_length
A_ : List[str] = win_length
A_ : Any = frame_signal_scale
A_ : str = preemphasis_coeff
A_ : List[str] = mel_floor
A_ : str = normalize_means
A_ : Any = normalize_vars
A_ : Optional[Any] = win_function
A_ : Dict = return_attention_mask
A_ : List[str] = win_length * sampling_rate // 1_000
A_ : List[str] = hop_length * sampling_rate // 1_000
A_ : List[str] = optimal_fft_length(self.sample_size )
A_ : str = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
A_ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case )
else:
A_ : List[str] = window_function(window_length=self.sample_size , name=self.win_function )
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
A_ : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=snake_case , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case , preemphasis=self.preemphasis_coeff , mel_filters=snake_case , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :str ):
'''simple docstring'''
if self.normalize_means:
A_ : int = x[:input_length].mean(axis=0 )
A_ : Any = np.subtract(snake_case , snake_case )
if self.normalize_vars:
A_ : List[Any] = x[:input_length].std(axis=0 )
A_ : Optional[int] = np.divide(snake_case , snake_case )
if input_length < x.shape[0]:
A_ : Optional[int] = padding_value
# make sure array is in float32
A_ : Union[str, Any] = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[np.ndarray] , snake_case :Optional[np.ndarray] = None ):
'''simple docstring'''
A_ : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case , snake_case , self.padding_value ) for x, n in zip(snake_case , snake_case )]
def __call__( self :int , snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case :Union[bool, str, PaddingStrategy] = False , snake_case :Optional[int] = None , snake_case :bool = False , snake_case :Optional[int] = None , snake_case :Optional[bool] = None , snake_case :Optional[Union[str, TensorType]] = None , snake_case :Optional[int] = None , **snake_case :Dict , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
A_ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
A_ : Optional[Any] = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : List[Any] = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
A_ : int = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Tuple = [raw_speech]
# extract fbank features
A_ : int = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
A_ : Union[str, Any] = BatchFeature({"input_features": features} )
A_ : str = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
# make sure list is in array format
A_ : Optional[int] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , snake_case ):
A_ : Union[str, Any] = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features]
A_ : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
A_ : Any = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
A_ : Dict = (
np.array(snake_case , dtype=np.intaa )
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
A_ : Optional[int] = self.normalize(
padded_inputs["input_features"] , attention_mask=snake_case )
if return_tensors is not None:
A_ : Dict = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 300
|
from __future__ import annotations
def __snake_case ( _lowerCAmelCase : list[float] ) -> bool:
if len(_lowerCAmelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A_ : List[str] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 1
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : int=None ) -> Tuple:
return field(default_factory=lambda: default , metadata=_lowerCAmelCase )
@dataclass
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
__UpperCamelCase = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
__UpperCamelCase = list_field(
default=[8, 32, 1_28, 5_12] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
__UpperCamelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
__UpperCamelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Benchmark training of model'''} )
__UpperCamelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Verbose memory tracing'''} )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
__UpperCamelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Trace memory line by line'''} )
__UpperCamelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Save result to a CSV file'''} )
__UpperCamelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Save all print statements in a log file'''} )
__UpperCamelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Whether to print environment information'''} )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
__UpperCamelCase = field(
default=f"""inference_time_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
__UpperCamelCase = field(
default=f"""inference_memory_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
__UpperCamelCase = field(
default=f"""train_time_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
__UpperCamelCase = field(
default=f"""train_memory_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
__UpperCamelCase = field(
default=f"""env_info_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
__UpperCamelCase = field(
default=f"""log_{round(time() )}.csv""" , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
__UpperCamelCase = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
__UpperCamelCase = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
warnings.warn(
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , snake_case , )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 300
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , snake_case :AutoencoderKL , snake_case :CLIPTextModel , snake_case :CLIPTokenizer , snake_case :UNetaDConditionModel , snake_case :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case :StableDiffusionSafetyChecker , snake_case :CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def __call__( self :Any , snake_case :Union[str, List[str]] , snake_case :int = 512 , snake_case :int = 512 , snake_case :int = 50 , snake_case :float = 7.5 , snake_case :Optional[Union[str, List[str]]] = None , snake_case :Optional[int] = 1 , snake_case :float = 0.0 , snake_case :Optional[torch.Generator] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[str] = "pil" , snake_case :bool = True , snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case :int = 1 , snake_case :Optional[torch.FloatTensor] = None , **snake_case :Optional[Any] , ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
A_ : Dict = 1
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = len(snake_case )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case )}." )
# get prompt text embeddings
A_ : int = self.tokenizer(
snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_ , A_ , A_ : int = text_embeddings.shape
A_ : List[str] = text_embeddings.repeat(1 , snake_case , 1 )
A_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ : List[str]
if negative_prompt is None:
A_ : List[str] = [""]
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !="
f" {type(snake_case )}." )
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A_ : Any = negative_prompt
A_ : Optional[int] = text_input_ids.shape[-1]
A_ : Dict = self.tokenizer(
snake_case , padding="max_length" , max_length=snake_case , truncation=snake_case , return_tensors="pt" , )
A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ : Tuple = uncond_embeddings.shape[1]
A_ : Dict = uncond_embeddings.repeat(snake_case , snake_case , 1 )
A_ : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A_ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ : Tuple = torch.randn(
snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(self.device )
A_ : Optional[Any] = torch.randn(snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(
self.device )
else:
A_ : int = torch.randn(
snake_case , generator=snake_case , device=self.device , dtype=snake_case )
A_ : Optional[int] = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A_ : Tuple = latents_reference.to(self.device )
A_ : Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A_ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A_ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A_ : Optional[Any] = 0 if dx < 0 else dx
A_ : Optional[Any] = 0 if dy < 0 else dy
A_ : List[str] = max(-dx , 0 )
A_ : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A_ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ : str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : List[str] = {}
if accepts_eta:
A_ : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
A_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
A_ : List[str] = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.chunk(2 )
A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
A_ : List[str] = 1 / 0.18215 * latents
A_ : Tuple = self.vae.decode(snake_case ).sample
A_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A_ : int = self.feature_extractor(self.numpy_to_pil(snake_case ) , return_tensors="pt" ).to(
self.device )
A_ , A_ : List[str] = self.safety_checker(
images=snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A_ : List[str] = None
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
| 300
| 1
|
import requests
_lowerCAmelCase : Tuple = '''''' # <-- Put your OpenWeatherMap appid here!
_lowerCAmelCase : Optional[Any] = '''https://api.openweathermap.org/data/2.5/'''
def __snake_case ( _lowerCAmelCase : str = "Chicago" , _lowerCAmelCase : str = APPID ) -> dict:
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def __snake_case ( _lowerCAmelCase : str = "Kolkata, India" , _lowerCAmelCase : str = APPID ) -> dict:
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def __snake_case ( _lowerCAmelCase : float = 55.68 , _lowerCAmelCase : float = 12.57 , _lowerCAmelCase : str = APPID ) -> dict:
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_lowerCAmelCase : Any = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 300
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Dict:
A_ : Optional[Any] = nn.functional.normalize(_lowerCAmelCase )
A_ : List[str] = nn.functional.normalize(_lowerCAmelCase )
return torch.mm(_lowerCAmelCase , normalized_text_embeds.t() )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self :int , snake_case :CLIPConfig ):
'''simple docstring'''
super().__init__(snake_case )
A_ : int = CLIPVisionModel(config.vision_config )
A_ : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case )
A_ : Tuple = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case )
A_ : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case )
A_ : List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=snake_case )
A_ : int = nn.Parameter(torch.ones(3 ) , requires_grad=snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ):
'''simple docstring'''
A_ : List[Any] = self.vision_model(snake_case )[1] # pooled_output
A_ : List[Any] = self.visual_projection(snake_case )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Optional[Any] = cosine_distance(snake_case , self.special_care_embeds ).cpu().float().numpy()
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ).cpu().float().numpy()
A_ : Union[str, Any] = []
A_ : Any = image_embeds.shape[0]
for i in range(snake_case ):
A_ : Optional[int] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A_ : Optional[Any] = special_cos_dist[i][concept_idx]
A_ : Tuple = self.special_care_embeds_weights[concept_idx].item()
A_ : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
A_ : Any = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A_ : Tuple = cos_dist[i][concept_idx]
A_ : Tuple = self.concept_embeds_weights[concept_idx].item()
A_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(snake_case )
result.append(snake_case )
A_ : Any = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor ):
'''simple docstring'''
A_ : List[str] = self.vision_model(snake_case )[1] # pooled_output
A_ : int = self.visual_projection(snake_case )
A_ : Tuple = cosine_distance(snake_case , self.special_care_embeds )
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
A_ : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A_ : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
A_ : Optional[Any] = special_care * 0.01
A_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A_ : Union[str, Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A_ : Union[str, Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 300
| 1
|
from functools import lru_cache
def __snake_case ( _lowerCAmelCase : int ) -> set:
A_ : str = 2
A_ : Tuple = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_lowerCAmelCase )
if n > 1:
factors.add(_lowerCAmelCase )
return factors
@lru_cache
def __snake_case ( _lowerCAmelCase : int ) -> int:
return len(unique_prime_factors(_lowerCAmelCase ) )
def __snake_case ( _lowerCAmelCase : list ) -> bool:
return len(set(_lowerCAmelCase ) ) in (0, 1)
def __snake_case ( _lowerCAmelCase : int ) -> list:
A_ : Any = 2
while True:
# Increment each value of a generated range
A_ : Optional[Any] = [base + i for i in range(_lowerCAmelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
A_ : Dict = [upf_len(_lowerCAmelCase ) for x in group]
checker.append(_lowerCAmelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_lowerCAmelCase ):
return group
# Increment our base variable by 1
base += 1
def __snake_case ( _lowerCAmelCase : int = 4 ) -> int:
A_ : int = run(_lowerCAmelCase )
return results[0] if len(_lowerCAmelCase ) else None
if __name__ == "__main__":
print(solution())
| 300
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
A_ : Tuple = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Dict:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : str = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" )
A_ : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : Optional[Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Optional[Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ) -> Any:
A_ : Dict = dct.pop(_lowerCAmelCase )
A_ : List[Any] = val
def __snake_case ( _lowerCAmelCase : List[str] ) -> int:
if "handwritten" in checkpoint_url:
A_ : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
A_ : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
A_ : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=_lowerCAmelCase )
A_ : Tuple = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : Tuple = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Optional[Any] = 1024
A_ : Union[str, Any] = 4096
A_ : Union[str, Any] = 24
A_ : List[Any] = 16
A_ : List[str] = 1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Dict = False
A_ : int = "relu"
A_ : Optional[int] = 1024
A_ : Any = True
A_ : List[Any] = False
A_ : Optional[int] = False
# load HuggingFace model
A_ : Union[str, Any] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase )
A_ : str = TrOCRForCausalLM(_lowerCAmelCase )
A_ : List[str] = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
A_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" , check_hash=_lowerCAmelCase )["model"]
A_ : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Dict = state_dict.pop(_lowerCAmelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
A_ : List[str] = val
else:
A_ : Optional[Any] = val
# load state dict
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A_ : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
A_ : Any = RobertaTokenizer.from_pretrained("roberta-large" )
A_ : Union[str, Any] = TrOCRProcessor(_lowerCAmelCase , _lowerCAmelCase )
A_ : List[str] = processor(images=prepare_img(_lowerCAmelCase ) , return_tensors="pt" ).pixel_values
# verify logits
A_ : Union[str, Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Optional[int] = model(pixel_values=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
A_ : Tuple = outputs.logits
A_ : Union[str, Any] = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Union[str, Any] = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : str = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 300
| 1
|
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str ) -> float:
def get_matched_characters(_lowerCAmelCase : str , _lowerCAmelCase : str ) -> str:
A_ : List[Any] = []
A_ : List[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
A_ : List[str] = int(max(0 , i - limit ) )
A_ : List[str] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_lowerCAmelCase )
A_ : Union[str, Any] = f"{_stra[0:_stra.index(_lowerCAmelCase )]} {_stra[_stra.index(_lowerCAmelCase ) + 1:]}"
return "".join(_lowerCAmelCase )
# matching characters
A_ : int = get_matched_characters(_lowerCAmelCase , _lowerCAmelCase )
A_ : Optional[int] = get_matched_characters(_lowerCAmelCase , _lowerCAmelCase )
A_ : Union[str, Any] = len(_lowerCAmelCase )
# transposition
A_ : List[Any] = (
len([(ca, ca) for ca, ca in zip(_lowerCAmelCase , _lowerCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
A_ : Optional[int] = 0.0
else:
A_ : Union[str, Any] = (
1
/ 3
* (
match_count / len(_lowerCAmelCase )
+ match_count / len(_lowerCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
A_ : List[str] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 300
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 1
@register_to_config
def __init__( self :Union[str, Any] , snake_case :int = 2_000 , snake_case :float = 0.15 , snake_case :float = 0.01 , snake_case :float = 1348.0 , snake_case :float = 1e-5 , snake_case :int = 1 , ):
'''simple docstring'''
A_ : Dict = sigma_max
# setable values
A_ : List[Any] = None
self.set_sigmas(snake_case , snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :torch.FloatTensor , snake_case :Optional[int] = None ):
'''simple docstring'''
return sample
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :float = None , snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
A_ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
A_ : Tuple = torch.linspace(1 , snake_case , snake_case , device=snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int , snake_case :float = None , snake_case :float = None , snake_case :float = None ):
'''simple docstring'''
A_ : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min
A_ : Any = sigma_max if sigma_max is not None else self.config.sigma_max
A_ : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(snake_case , snake_case )
A_ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
A_ : Any = torch.exp(torch.linspace(math.log(snake_case ) , math.log(snake_case ) , snake_case ) )
A_ : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Dict ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :int , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
A_ : int = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
A_ : Optional[Any] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
A_ : Dict = timesteps.to(self.discrete_sigmas.device )
A_ : Optional[int] = self.discrete_sigmas[timesteps].to(sample.device )
A_ : int = self.get_adjacent_sigma(snake_case , snake_case ).to(sample.device )
A_ : Union[str, Any] = torch.zeros_like(snake_case )
A_ : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
A_ : Optional[int] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
A_ : Tuple = diffusion.unsqueeze(-1 )
A_ : Optional[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
A_ : List[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=snake_case , device=sample.device , dtype=sample.dtype )
A_ : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
A_ : Any = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=snake_case , prev_sample_mean=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
A_ : Dict = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
A_ : int = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
A_ : List[Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
A_ : Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
A_ : Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
A_ : int = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
A_ : str = step_size.unsqueeze(-1 )
A_ : Optional[Any] = sample + step_size * model_output
A_ : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , ):
'''simple docstring'''
A_ : Union[str, Any] = timesteps.to(original_samples.device )
A_ : List[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
A_ : List[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(snake_case ) * sigmas[:, None, None, None]
)
A_ : Optional[int] = noise + original_samples
return noisy_samples
def __len__( self :Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCamelCase = Features({'''audio''': Audio()} )
__UpperCamelCase = Features({'''labels''': ClassLabel} )
__UpperCamelCase = "audio"
__UpperCamelCase = "labels"
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Tuple ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , snake_case ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
A_ : List[Any] = copy.deepcopy(self )
A_ : int = self.label_schema.copy()
A_ : str = features[self.label_column]
A_ : int = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 300
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : float | Decimal , _lowerCAmelCase : float = 10**-10 ) -> float:
A_ : Dict = a
while True:
A_ : Union[str, Any] = Decimal(_lowerCAmelCase ) - (
Decimal(eval(_lowerCAmelCase ) ) / Decimal(eval(str(diff(_lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCAmelCase ) ) < precision: # noqa: S307
return float(_lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 300
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''ibert'''
def __init__( self :List[str] , snake_case :Optional[Any]=30_522 , snake_case :Tuple=768 , snake_case :List[str]=12 , snake_case :int=12 , snake_case :List[Any]=3_072 , snake_case :Dict="gelu" , snake_case :int=0.1 , snake_case :str=0.1 , snake_case :str=512 , snake_case :Dict=2 , snake_case :List[str]=0.02 , snake_case :str=1e-12 , snake_case :int=1 , snake_case :Dict=0 , snake_case :int=2 , snake_case :List[Any]="absolute" , snake_case :Union[str, Any]=False , snake_case :List[Any]="none" , **snake_case :Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
A_ : List[str] = vocab_size
A_ : Union[str, Any] = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[str] = hidden_act
A_ : Optional[int] = intermediate_size
A_ : Dict = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Optional[int] = max_position_embeddings
A_ : Union[str, Any] = type_vocab_size
A_ : List[str] = initializer_range
A_ : List[Any] = layer_norm_eps
A_ : Any = position_embedding_type
A_ : List[Any] = quant_mode
A_ : str = force_dequant
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
A_ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 300
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowerCAmelCase : List[Any] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
_lowerCAmelCase : Union[str, Any] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
_lowerCAmelCase : Optional[Any] = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Optional[int] , snake_case :List[Any] , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , ):
'''simple docstring'''
A_ : List[str] = len(references[0] )
if any(len(snake_case ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A_ : int = [[refs[i] for refs in references] for i in range(snake_case )]
A_ : Optional[Any] = TER(
normalized=snake_case , no_punct=snake_case , asian_support=snake_case , case_sensitive=snake_case , )
A_ : List[Any] = sb_ter.corpus_score(snake_case , snake_case )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 300
| 1
|
from ....utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :List[Any] , snake_case :int , snake_case :str=None , snake_case :Optional[int]=2_048 ):
'''simple docstring'''
A_ : str = config.__dict__
A_ : str = modal_hidden_size
if num_labels:
A_ : Union[str, Any] = num_labels
| 300
|
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int ) -> str:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=0 ) -> Any:
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[column] )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Any=float("inf" ) ) -> int:
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowerCAmelCase ):
A_ : Tuple = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
A_ : Union[str, Any] = current_dis
return min_dis
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=float("inf" ) ) -> Dict:
for i in range(min(6 , points_counts - 1 ) , _lowerCAmelCase ):
for j in range(max(0 , i - 6 ) , _lowerCAmelCase ):
A_ : List[Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
A_ : Union[str, Any] = current_dis
return min_dis
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> List[str]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(_lowerCAmelCase , _lowerCAmelCase )
# recursion
A_ : Optional[int] = points_counts // 2
A_ : List[Any] = closest_pair_of_points_sqr(
_lowerCAmelCase , points_sorted_on_y[:mid] , _lowerCAmelCase )
A_ : List[Any] = closest_pair_of_points_sqr(
_lowerCAmelCase , points_sorted_on_y[mid:] , points_counts - mid )
A_ : Tuple = min(_lowerCAmelCase , _lowerCAmelCase )
A_ : Dict = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowerCAmelCase )
A_ : Tuple = dis_between_closest_in_strip(
_lowerCAmelCase , len(_lowerCAmelCase ) , _lowerCAmelCase )
return min(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Any:
A_ : Optional[Any] = column_based_sort(_lowerCAmelCase , column=0 )
A_ : Optional[int] = column_based_sort(_lowerCAmelCase , column=1 )
return (
closest_pair_of_points_sqr(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
) ** 0.5
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 300
| 1
|
def __snake_case ( _lowerCAmelCase : int ) -> "list[int]":
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
A_ : List[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A_ : List[str] = 1
if upper_limit > 0:
A_ : Union[str, Any] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_lowerCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
_lowerCAmelCase : List[Any] = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 300
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self :Dict , snake_case :Optional[int] , snake_case :Tuple=13 , snake_case :List[Any]=30 , snake_case :Union[str, Any]=2 , snake_case :List[Any]=3 , snake_case :Tuple=True , snake_case :Dict=True , snake_case :Dict=32 , snake_case :List[str]=5 , snake_case :Optional[Any]=4 , snake_case :Any=37 , snake_case :Dict="gelu" , snake_case :List[str]=0.1 , snake_case :str=0.1 , snake_case :Tuple=10 , snake_case :str=0.02 , snake_case :Optional[Any]=None , ):
'''simple docstring'''
A_ : Tuple = parent
A_ : int = batch_size
A_ : List[str] = image_size
A_ : List[Any] = patch_size
A_ : Optional[Any] = num_channels
A_ : List[Any] = is_training
A_ : Tuple = use_labels
A_ : Union[str, Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Any = num_attention_heads
A_ : List[str] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Any = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : Dict = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Optional[int] = (image_size // patch_size) ** 2
A_ : List[str] = num_patches + 1
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Tuple = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[Any] , snake_case :str , snake_case :Tuple ):
'''simple docstring'''
A_ : Optional[Any] = ViTMSNModel(config=snake_case )
model.to(snake_case )
model.eval()
A_ : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[int] , snake_case :List[str] , snake_case :List[str] ):
'''simple docstring'''
A_ : Dict = self.type_sequence_label_size
A_ : Tuple = ViTMSNForImageClassification(snake_case )
model.to(snake_case )
model.eval()
A_ : Union[str, Any] = model(snake_case , labels=snake_case )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Union[str, Any] = 1
A_ : int = ViTMSNForImageClassification(snake_case )
model.to(snake_case )
model.eval()
A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Optional[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : List[str] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Optional[int] = config_and_inputs
A_ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = ViTMSNModelTester(self )
A_ : str = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[int] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(snake_case )
A_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[str] = [*signature.parameters.keys()]
A_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = ViTMSNModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __snake_case ( ) -> Optional[Any]:
A_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
torch.manual_seed(2 )
A_ : Any = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(snake_case )
A_ : List[str] = self.default_image_processor
A_ : int = prepare_img()
A_ : List[str] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
A_ : Optional[int] = model(**snake_case )
# verify the logits
A_ : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case )
A_ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
| 300
| 1
|
import random
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ) -> Optional[int]:
A_ : Union[str, Any] = a[left_index]
A_ : int = left_index + 1
for j in range(left_index + 1 , _lowerCAmelCase ):
if a[j] < pivot:
A_ , A_ : Optional[Any] = a[i], a[j]
i += 1
A_ , A_ : Optional[int] = a[i - 1], a[left_index]
return i - 1
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[str] ) -> Any:
if left < right:
A_ : int = random.randint(_lowerCAmelCase , right - 1 )
A_ , A_ : List[Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
A_ : Dict = partition(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
quick_sort_random(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_lowerCAmelCase , pivot_index + 1 , _lowerCAmelCase ) # recursive quicksort to the right of the pivot point
def __snake_case ( ) -> List[str]:
A_ : Optional[Any] = input("Enter numbers separated by a comma:\n" ).strip()
A_ : Optional[Any] = [int(_lowerCAmelCase ) for item in user_input.split("," )]
quick_sort_random(_lowerCAmelCase , 0 , len(_lowerCAmelCase ) )
print(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 300
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :str ):
'''simple docstring'''
A_ : Dict = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**snake_case )
return config
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Tuple = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : List[str] = scheduler_class(**snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : int = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : int = scheduler_class(**snake_case )
A_ : Tuple = len(snake_case )
A_ : List[str] = self.dummy_model()
A_ : Optional[Any] = self.dummy_sample_deter
A_ : List[str] = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Tuple = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : Optional[int] = pred_prev_sample
A_ : Tuple = torch.sum(torch.abs(snake_case ) )
A_ : str = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Optional[int] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config(prediction_type="v_prediction" )
A_ : List[str] = scheduler_class(**snake_case )
A_ : int = len(snake_case )
A_ : Dict = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Any = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Optional[int] = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Tuple = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : List[str] = pred_prev_sample
A_ : Optional[Any] = torch.sum(torch.abs(snake_case ) )
A_ : List[str] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Dict = scheduler_class(**snake_case )
A_ : Optional[int] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case )
A_ : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(snake_case ):
if i == len(snake_case ) - 1:
A_ : str = -1
else:
A_ : List[str] = timesteps[i + 1]
A_ : Optional[int] = scheduler.previous_timestep(snake_case )
A_ : List[str] = prev_t.item()
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**snake_case )
A_ : List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Any = self.scheduler_classes[0]
A_ : Union[str, Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Union[str, Any] = [100, 87, 50, 1, 0]
A_ : Optional[int] = len(snake_case )
with self.assertRaises(snake_case , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=snake_case )
| 300
| 1
|
def __snake_case ( _lowerCAmelCase : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
A_ : List[Any] = gray_code_sequence_string(_lowerCAmelCase )
#
# convert them to integers
for i in range(len(_lowerCAmelCase ) ):
A_ : Any = int(sequence[i] , 2 )
return sequence
def __snake_case ( _lowerCAmelCase : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
A_ : List[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
A_ : Any = gray_code_sequence_string(bit_count - 1 )
A_ : Union[str, Any] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
A_ : int = "0" + smaller_sequence[i]
sequence.append(_lowerCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
A_ : Any = "1" + smaller_sequence[i]
sequence.append(_lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : int = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ) -> List[Any]:
for attribute in key.split("." ):
A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
A_ : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Any = value
elif weight_type == "bias":
A_ : str = value
else:
A_ : Any = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ) -> List[str]:
A_ : Optional[Any] = []
A_ : Any = fairseq_model.state_dict()
A_ : Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A_ : str = None
for name, value in fairseq_dict.items():
A_ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
A_ : Optional[Any] = True
elif name.split("." )[0] == "proj":
A_ : Dict = fairseq_model.proj
A_ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ : int = True
if "*" in mapped_key:
A_ : Optional[Any] = name.split(_lowerCAmelCase )[0].split("." )[-2]
A_ : int = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
A_ : List[Any] = "weight_g"
elif "weight_v" in name:
A_ : List[Any] = "weight_v"
elif "bias" in name:
A_ : Dict = "bias"
elif "weight" in name:
A_ : List[Any] = "weight"
else:
A_ : Dict = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str:
A_ : Any = full_name.split("conv_layers." )[-1]
A_ : Optional[int] = name.split("." )
A_ : Optional[Any] = int(items[0] )
A_ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
A_ : List[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
A_ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
A_ : List[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
A_ : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : Optional[int] ) -> str:
A_ , A_ : List[str] = emb.weight.shape
A_ : Optional[int] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
A_ : List[Any] = emb.weight.data
return lin_layer
def __snake_case ( _lowerCAmelCase : str ) -> Tuple:
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
A_ : int = f.readlines()
A_ : Dict = [line.split(" " )[0] for line in lines]
A_ : Tuple = len(_lowerCAmelCase )
A_ : Union[str, Any] = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , ) -> Tuple:
A_ : Optional[int] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
A_ : str = SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase )
A_ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
A_ , A_ , A_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
A_ : Union[str, Any] = model[0].eval()
# set weights for wav2vec2 encoder
A_ : Tuple = WavaVecaModel(_lowerCAmelCase )
A_ : str = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
A_ : Tuple = SpeechaTextaForCausalLM(_lowerCAmelCase )
A_ , A_ : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
A_ : Union[str, Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
A_ : str = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
A_ : Optional[Any] = False
# add projection layer
A_ : Optional[Any] = nn.Parameter(projection_layer.weight )
A_ : int = nn.Parameter(projection_layer.bias )
A_ : str = create_vocab_dict(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "vocab.json" ) , "w" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
A_ : Any = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , "vocab.json" ) )
tokenizer.save_pretrained(_lowerCAmelCase )
A_ : Optional[int] = hf_wavavec.config.to_dict()
A_ : int = tokenizer.pad_token_id
A_ : List[str] = tokenizer.bos_token_id
A_ : List[str] = tokenizer.eos_token_id
A_ : List[str] = "speech_to_text_2"
A_ : Tuple = "wav2vec2"
A_ : str = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=10_224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 300
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : Dict = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : List[str] = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
_lowerCAmelCase : Optional[Any] = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = RealmTokenizer
def __init__( self :int , snake_case :Dict=None , snake_case :Optional[int]=None , snake_case :List[Any]=True , snake_case :str="[UNK]" , snake_case :int="[SEP]" , snake_case :Dict="[PAD]" , snake_case :Optional[Any]="[CLS]" , snake_case :int="[MASK]" , snake_case :Optional[int]=True , snake_case :str=None , **snake_case :Union[str, Any] , ):
'''simple docstring'''
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
A_ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
A_ : int = getattr(snake_case , normalizer_state.pop("type" ) )
A_ : str = do_lower_case
A_ : Union[str, Any] = strip_accents
A_ : Optional[Any] = tokenize_chinese_chars
A_ : Dict = normalizer_class(**snake_case )
A_ : Any = do_lower_case
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , **snake_case :Optional[Any] ):
'''simple docstring'''
A_ : int = PaddingStrategy.MAX_LENGTH
A_ : str = text
A_ : Optional[Any] = kwargs.pop("text_pair" , snake_case )
A_ : Dict = kwargs.pop("return_tensors" , snake_case )
A_ : Dict = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(snake_case ):
if batch_text_pair is not None:
A_ : Union[str, Any] = batch_text_pair[idx]
else:
A_ : Optional[int] = None
A_ : List[Any] = super().__call__(snake_case , snake_case , return_tensors=snake_case , **snake_case )
A_ : Dict = encoded_candidates.get("input_ids" )
A_ : List[Any] = encoded_candidates.get("attention_mask" )
A_ : List[Any] = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case )
A_ : List[str] = {key: item for key, item in output_data.items() if len(snake_case ) != 0}
return BatchEncoding(snake_case , tensor_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Union[str, Any] , snake_case :List[Any]=None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
A_ : Any = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 300
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __magic_name__ :
"""simple docstring"""
def __init__( self :Tuple , snake_case :Optional[Any] , snake_case :Tuple=13 , snake_case :Dict=7 , snake_case :List[Any]=True , snake_case :List[Any]=True , snake_case :Dict=True , snake_case :Any=True , snake_case :Optional[int]=99 , snake_case :Any=32 , snake_case :Dict=2 , snake_case :int=4 , snake_case :Optional[int]=37 , snake_case :List[str]="gelu" , snake_case :List[Any]=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Tuple=512 , snake_case :Tuple=16 , snake_case :Tuple=2 , snake_case :Optional[int]=0.02 , snake_case :str=3 , snake_case :Optional[int]=4 , snake_case :List[str]=None , snake_case :Tuple=1_000 , ):
'''simple docstring'''
A_ : str = parent
A_ : str = batch_size
A_ : str = seq_length
A_ : Any = is_training
A_ : Any = use_input_mask
A_ : str = use_token_type_ids
A_ : Tuple = use_labels
A_ : Optional[Any] = vocab_size
A_ : Dict = hidden_size
A_ : str = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : str = intermediate_size
A_ : int = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : Any = type_sequence_label_size
A_ : Dict = initializer_range
A_ : Any = num_labels
A_ : Optional[int] = num_choices
A_ : Optional[Any] = scope
A_ : Any = range_bbox
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ : str = bbox[i, j, 3]
A_ : Union[str, Any] = bbox[i, j, 1]
A_ : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ : Any = bbox[i, j, 2]
A_ : Tuple = bbox[i, j, 0]
A_ : int = t
A_ : int = tf.convert_to_tensor(snake_case )
A_ : Any = None
if self.use_input_mask:
A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : str = None
if self.use_token_type_ids:
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Dict = None
A_ : List[Any] = None
A_ : List[str] = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : str = ids_tensor([self.batch_size] , self.num_choices )
A_ : int = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , snake_case :Union[str, Any] , snake_case :int , snake_case :int , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Optional[int] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Any = TFLayoutLMModel(config=snake_case )
A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
A_ : str = model(snake_case , snake_case , token_type_ids=snake_case )
A_ : List[Any] = model(snake_case , snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :List[Any] , snake_case :List[str] , snake_case :Optional[Any] , snake_case :Dict , snake_case :Any , snake_case :Union[str, Any] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Optional[int] = TFLayoutLMForMaskedLM(config=snake_case )
A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , snake_case :Tuple , snake_case :Tuple , snake_case :List[str] , snake_case :Tuple , snake_case :str , snake_case :Optional[int] , snake_case :Any ):
'''simple docstring'''
A_ : Union[str, Any] = self.num_labels
A_ : int = TFLayoutLMForSequenceClassification(config=snake_case )
A_ : Optional[int] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict , snake_case :str , snake_case :Optional[Any] , snake_case :int , snake_case :Any , snake_case :Tuple , snake_case :List[str] , snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.num_labels
A_ : str = TFLayoutLMForTokenClassification(config=snake_case )
A_ : Union[str, Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[str] , snake_case :Optional[int] , snake_case :Union[str, Any] , snake_case :List[Any] , snake_case :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ):
'''simple docstring'''
A_ : Optional[Any] = TFLayoutLMForQuestionAnswering(config=snake_case )
A_ : List[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__UpperCamelCase = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 10
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Tuple = TFLayoutLMModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[str] = TFLayoutLMModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def __snake_case ( ) -> Optional[Any]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
A_ : int = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
A_ : int = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
A_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
A_ : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
A_ : Tuple = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : str = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
A_ , A_ , A_ , A_ , A_ : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Tuple = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the sequence output on [0, :3, :3]
A_ : List[Any] = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-3 ) )
# test the pooled output on [1, :3]
A_ : Optional[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
A_ , A_ , A_ , A_ , A_ : Any = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Dict = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
A_ : List[str] = outputs.loss
A_ : Union[str, Any] = (2,)
self.assertEqual(loss.shape , snake_case )
# test the shape of the logits
A_ : Tuple = outputs.logits
A_ : Tuple = (2, 2)
self.assertEqual(logits.shape , snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : int = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
A_ , A_ , A_ , A_ , A_ : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Union[str, Any] = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
# test the shape of the logits
A_ : Dict = outputs.logits
A_ : List[Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
A_ , A_ , A_ , A_ , A_ : str = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Union[str, Any] = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the shape of the logits
A_ : Union[str, Any] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , snake_case )
self.assertEqual(outputs.end_logits.shape , snake_case )
| 300
| 1
|
def __snake_case ( _lowerCAmelCase : List[str] ) -> Optional[Any]:
A_ : Dict = [0] * len(_lowerCAmelCase )
A_ : str = []
A_ : Dict = [1] * len(_lowerCAmelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(_lowerCAmelCase )
while queue:
A_ : Any = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
A_ : Tuple = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_lowerCAmelCase )
print(max(_lowerCAmelCase ) )
# Adjacency list of Graph
_lowerCAmelCase : List[str] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 300
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase : Optional[int] = '''
Human: <<task>>
Assistant: '''
_lowerCAmelCase : int = '''huggingface-tools/default-prompts'''
_lowerCAmelCase : Any = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict="run" ) -> List[Any]:
if prompt_or_repo_id is None:
A_ : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
A_ : Optional[Any] = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 300
| 1
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_lowerCAmelCase : List[Any] = 1.054571817e-34 # unit of ℏ : J * s
_lowerCAmelCase : Tuple = 3e8 # unit of c : m * s^-1
def __snake_case ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
A_ : Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
A_ : Tuple = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
A_ : List[str] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
def __snake_case ( _lowerCAmelCase : list ) -> list:
if len(_lowerCAmelCase ) <= 1:
return [tuple(_lowerCAmelCase )]
A_ : Tuple = []
def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ):
A_ : List[str] = [0] * n
res.append(tuple(_lowerCAmelCase ) )
A_ : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
A_ , A_ : str = arr[i], arr[0]
else:
A_ , A_ : List[str] = arr[i], arr[c[i]]
res.append(tuple(_lowerCAmelCase ) )
c[i] += 1
A_ : Tuple = 0
else:
A_ : Dict = 0
i += 1
generate(len(_lowerCAmelCase ) , _lowerCAmelCase )
return res
if __name__ == "__main__":
_lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 300
| 1
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : str = ['''model.decoder.embed_positions.weights''']
def __snake_case ( _lowerCAmelCase : Tuple ) -> str:
if "emb" in name:
A_ : int = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
A_ : Dict = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
A_ : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
A_ : Optional[int] = name.replace("linear1" , "fc1" )
if "linear2" in name:
A_ : Optional[Any] = name.replace("linear2" , "fc2" )
if "norm1" in name:
A_ : Dict = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
A_ : Union[str, Any] = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
A_ : int = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
A_ : Union[str, Any] = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
A_ : List[str] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
A_ : List[Any] = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def __snake_case ( _lowerCAmelCase : OrderedDict , _lowerCAmelCase : int ) -> Tuple[Dict, Dict]:
A_ : List[Any] = list(state_dict.keys() )
A_ : Optional[int] = {}
for key in keys:
A_ : Any = state_dict.pop(_lowerCAmelCase )
A_ : Optional[Any] = rename_keys(_lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
A_ : Optional[int] = val[:hidden_size, :]
A_ : Any = val[hidden_size : 2 * hidden_size, :]
A_ : Union[str, Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A_ : List[str] = val
else:
A_ : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def __snake_case ( _lowerCAmelCase : str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
A_ : Tuple = 1024
A_ : Optional[int] = 24
A_ : List[str] = 16
elif checkpoint == "medium":
A_ : Tuple = 1536
A_ : Optional[int] = 48
A_ : str = 24
elif checkpoint == "large":
A_ : str = 2048
A_ : str = 48
A_ : Tuple = 32
else:
raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
A_ : Union[str, Any] = MusicgenDecoderConfig(
hidden_size=_lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_lowerCAmelCase , num_attention_heads=_lowerCAmelCase , )
return config
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any=None , _lowerCAmelCase : int=None , _lowerCAmelCase : Optional[int]="cpu" ) -> Tuple:
A_ : Optional[int] = MusicGen.get_pretrained(_lowerCAmelCase , device=_lowerCAmelCase )
A_ : Optional[Any] = decoder_config_from_checkpoint(_lowerCAmelCase )
A_ : Union[str, Any] = fairseq_model.lm.state_dict()
A_ , A_ : Dict = rename_state_dict(
_lowerCAmelCase , hidden_size=decoder_config.hidden_size )
A_ : Union[str, Any] = TaEncoderModel.from_pretrained("t5-base" )
A_ : str = EncodecModel.from_pretrained("facebook/encodec_32khz" )
A_ : Dict = MusicgenForCausalLM(_lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A_ , A_ : Tuple = decoder.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" )
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
A_ : List[Any] = MusicgenForConditionalGeneration(text_encoder=_lowerCAmelCase , audio_encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_lowerCAmelCase )
# check we can do a forward pass
A_ : Any = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A_ : Tuple = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A_ : str = model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
A_ : List[str] = AutoTokenizer.from_pretrained("t5-base" )
A_ : int = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
A_ : List[str] = MusicgenProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
# set the appropriate bos/pad token ids
A_ : List[Any] = 2048
A_ : Optional[int] = 2048
# set other default generation config params
A_ : Tuple = int(30 * audio_encoder.config.frame_rate )
A_ : List[str] = True
A_ : Any = 3.0
if pytorch_dump_folder is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if repo_id:
logger.info(f"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(_lowerCAmelCase )
processor.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 300
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : List[Any] = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : Any = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = RobertaTokenizer
def __init__( self :Dict , snake_case :List[str]=None , snake_case :List[Any]=None , snake_case :Union[str, Any]=None , snake_case :List[str]="replace" , snake_case :Tuple="<s>" , snake_case :Union[str, Any]="</s>" , snake_case :str="</s>" , snake_case :Union[str, Any]="<s>" , snake_case :int="<unk>" , snake_case :Tuple="<pad>" , snake_case :List[str]="<mask>" , snake_case :Any=False , snake_case :Union[str, Any]=True , **snake_case :Optional[int] , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
A_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : Dict = getattr(snake_case , pre_tok_state.pop("type" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**snake_case )
A_ : Optional[int] = add_prefix_space
A_ : Optional[int] = "post_processor"
A_ : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
A_ : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : List[Any] = tuple(state["sep"] )
if "cls" in state:
A_ : Optional[Any] = tuple(state["cls"] )
A_ : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : List[Any] = add_prefix_space
A_ : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
A_ : List[str] = trim_offsets
A_ : Any = True
if changes_to_apply:
A_ : Optional[Any] = getattr(snake_case , state.pop("type" ) )
A_ : Any = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Dict ):
'''simple docstring'''
A_ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
A_ : Any = value
def SCREAMING_SNAKE_CASE ( self :Dict , *snake_case :Tuple , **snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : Any = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , *snake_case :str , **snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : Any = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Optional[Any]=None ):
'''simple docstring'''
A_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : Any = [self.sep_token_id]
A_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 300
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''yolos'''
def __init__( self :Union[str, Any] , snake_case :Tuple=768 , snake_case :str=12 , snake_case :Optional[int]=12 , snake_case :List[Any]=3_072 , snake_case :Dict="gelu" , snake_case :Union[str, Any]=0.0 , snake_case :List[Any]=0.0 , snake_case :Union[str, Any]=0.02 , snake_case :Optional[int]=1e-12 , snake_case :str=[512, 864] , snake_case :str=16 , snake_case :List[Any]=3 , snake_case :Dict=True , snake_case :List[Any]=100 , snake_case :int=True , snake_case :List[Any]=False , snake_case :Optional[Any]=1 , snake_case :List[str]=5 , snake_case :Any=2 , snake_case :int=5 , snake_case :Any=2 , snake_case :Any=0.1 , **snake_case :Any , ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : int = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Optional[Any] = initializer_range
A_ : List[str] = layer_norm_eps
A_ : int = image_size
A_ : Dict = patch_size
A_ : Union[str, Any] = num_channels
A_ : str = qkv_bias
A_ : Dict = num_detection_tokens
A_ : Tuple = use_mid_position_embeddings
A_ : Union[str, Any] = auxiliary_loss
# Hungarian matcher
A_ : Union[str, Any] = class_cost
A_ : int = bbox_cost
A_ : Optional[Any] = giou_cost
# Loss coefficients
A_ : Optional[Any] = bbox_loss_coefficient
A_ : Optional[int] = giou_loss_coefficient
A_ : Tuple = eos_coefficient
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return 1e-4
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return 12
| 300
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCAmelCase : int = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
_lowerCAmelCase : Tuple = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
_lowerCAmelCase : int = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[List[List[str]]] , snake_case :List[List[str]] , snake_case :int = 1 , snake_case :int = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case , hypotheses=snake_case , min_len=snake_case , max_len=snake_case )
}
| 300
| 1
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''AutoTokenizer'''
__UpperCamelCase = ['''tokenizer''']
__UpperCamelCase = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self :Union[str, Any] , snake_case :int , snake_case :Any=None ):
'''simple docstring'''
super().__init__(snake_case )
A_ : Union[str, Any] = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE ( cls :int , snake_case :Optional[int] , snake_case :Optional[Any]="speaker_embeddings_path.json" , **snake_case :Dict ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
A_ : Tuple = get_file_from_repo(
snake_case , snake_case , subfolder=kwargs.pop("subfolder" , snake_case ) , cache_dir=kwargs.pop("cache_dir" , snake_case ) , force_download=kwargs.pop("force_download" , snake_case ) , proxies=kwargs.pop("proxies" , snake_case ) , resume_download=kwargs.pop("resume_download" , snake_case ) , local_files_only=kwargs.pop("local_files_only" , snake_case ) , use_auth_token=kwargs.pop("use_auth_token" , snake_case ) , revision=kwargs.pop("revision" , snake_case ) , )
if speaker_embeddings_path is None:
logger.warning(
f"`{os.path.join(snake_case , snake_case )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
A_ : List[str] = None
else:
with open(snake_case ) as speaker_embeddings_json:
A_ : Optional[int] = json.load(snake_case )
else:
A_ : str = None
A_ : Optional[int] = AutoTokenizer.from_pretrained(snake_case , **snake_case )
return cls(tokenizer=snake_case , speaker_embeddings=snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :str , snake_case :Any="speaker_embeddings_path.json" , snake_case :Tuple="speaker_embeddings" , snake_case :bool = False , **snake_case :Tuple , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(snake_case , snake_case , "v2" ) , exist_ok=snake_case )
A_ : Optional[Any] = {}
A_ : List[str] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : int = self._load_voice_preset(snake_case )
A_ : List[Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , snake_case , f"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=snake_case , )
A_ : Tuple = os.path.join(snake_case , f"{prompt_key}_{key}.npy" )
A_ : int = tmp_dict
with open(os.path.join(snake_case , snake_case ) , "w" ) as fp:
json.dump(snake_case , snake_case )
super().save_pretrained(snake_case , snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :str = None , **snake_case :List[str] ):
'''simple docstring'''
A_ : Tuple = self.speaker_embeddings[voice_preset]
A_ : Any = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
A_ : Any = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , snake_case ) , cache_dir=kwargs.pop("cache_dir" , snake_case ) , force_download=kwargs.pop("force_download" , snake_case ) , proxies=kwargs.pop("proxies" , snake_case ) , resume_download=kwargs.pop("resume_download" , snake_case ) , local_files_only=kwargs.pop("local_files_only" , snake_case ) , use_auth_token=kwargs.pop("use_auth_token" , snake_case ) , revision=kwargs.pop("revision" , snake_case ) , )
if path is None:
raise ValueError(
f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
A_ : List[str] = np.load(snake_case )
return voice_preset_dict
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self :Any , snake_case :Optional[Any]=None , snake_case :Optional[int]=None , snake_case :int="pt" , snake_case :Union[str, Any]=256 , snake_case :int=False , snake_case :List[Any]=True , snake_case :str=False , **snake_case :List[str] , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(snake_case , snake_case ):
if (
isinstance(snake_case , snake_case )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : int = self._load_voice_preset(snake_case )
else:
if isinstance(snake_case , snake_case ) and not voice_preset.endswith(".npz" ):
A_ : Tuple = voice_preset + ".npz"
A_ : List[Any] = np.load(snake_case )
if voice_preset is not None:
self._validate_voice_preset_dict(snake_case , **snake_case )
A_ : List[Any] = BatchFeature(data=snake_case , tensor_type=snake_case )
A_ : int = self.tokenizer(
snake_case , return_tensors=snake_case , padding="max_length" , max_length=snake_case , return_attention_mask=snake_case , return_token_type_ids=snake_case , add_special_tokens=snake_case , **snake_case , )
if voice_preset is not None:
A_ : Optional[int] = voice_preset
return encoded_text
| 300
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
A_ : Tuple = tmp_path / "cache"
A_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Optional[Any] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ) -> str:
A_ : List[Any] = tmp_path / "cache"
A_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : int = features.copy() if features else default_expected_features
A_ : str = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Union[str, Any] = ParquetDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ) -> Optional[Any]:
A_ : Dict = tmp_path / "cache"
A_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> List[str]:
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
A_ : int = parquet_path
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
A_ : Optional[int] = [parquet_path]
A_ : Optional[int] = tmp_path / "cache"
A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=("train",) ) -> Tuple:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
for split in splits:
A_ : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> Optional[int]:
A_ : Optional[Any] = tmp_path / "cache"
A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Union[str, Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str ) -> Tuple:
A_ : Optional[Any] = tmp_path / "cache"
A_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : List[str] = features.copy() if features else default_expected_features
A_ : Tuple = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Optional[int] = ParquetDatasetReader({"train": parquet_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Union[str, Any]:
if split:
A_ : Any = {split: parquet_path}
else:
A_ : Optional[Any] = "train"
A_ : str = {"train": parquet_path, "test": parquet_path}
A_ : Any = tmp_path / "cache"
A_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Dict = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ) -> Dict:
A_ : List[str] = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
A_ : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
A_ : Dict = pf.read()
assert dataset.data.table == output_table
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ) -> List[Any]:
A_ : Tuple = str(shared_datadir / "test_image_rgb.jpg" )
A_ : int = {"image": [image_path]}
A_ : Optional[Any] = Features({"image": Image()} )
A_ : Union[str, Any] = Dataset.from_dict(_lowerCAmelCase , features=_lowerCAmelCase )
A_ : Tuple = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
A_ : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
A_ : int = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCAmelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ) -> Any:
assert get_writer_batch_size(_lowerCAmelCase ) == expected
| 300
| 1
|
def __snake_case ( _lowerCAmelCase : int = 50 ) -> int:
A_ : List[str] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 300
|
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]="shi-labs/oneformer_demo" ) -> int:
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) as f:
A_ : Optional[int] = json.load(_lowerCAmelCase )
A_ : Union[str, Any] = {}
A_ : Tuple = []
A_ : Optional[Any] = []
for key, info in class_info.items():
A_ : Tuple = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
A_ : Optional[Any] = thing_ids
A_ : int = class_names
return metadata
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :List[Any] , snake_case :List[str] , snake_case :int=7 , snake_case :Optional[int]=3 , snake_case :Union[str, Any]=30 , snake_case :Tuple=400 , snake_case :List[Any]=None , snake_case :Optional[Any]=True , snake_case :Tuple=True , snake_case :Dict=[0.5, 0.5, 0.5] , snake_case :Any=[0.5, 0.5, 0.5] , snake_case :Optional[int]=10 , snake_case :Tuple=False , snake_case :Optional[int]=255 , snake_case :Optional[Any]="shi-labs/oneformer_demo" , snake_case :Optional[Any]="ade20k_panoptic.json" , snake_case :Optional[int]=10 , ):
'''simple docstring'''
A_ : Tuple = parent
A_ : List[str] = batch_size
A_ : Optional[int] = num_channels
A_ : Tuple = min_resolution
A_ : List[Any] = max_resolution
A_ : Union[str, Any] = do_resize
A_ : Any = {"shortest_edge": 32, "longest_edge": 1_333} if size is None else size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : List[Any] = image_std
A_ : Union[str, Any] = class_info_file
A_ : List[Any] = prepare_metadata(snake_case , snake_case )
A_ : Tuple = num_text
A_ : str = repo_path
# for the post_process_functions
A_ : Any = 2
A_ : int = 10
A_ : Optional[int] = 10
A_ : Tuple = 3
A_ : Tuple = 4
A_ : str = num_labels
A_ : int = do_reduce_labels
A_ : List[Any] = ignore_index
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Any , snake_case :Any=False ):
'''simple docstring'''
if not batched:
A_ : List[str] = image_inputs[0]
if isinstance(snake_case , Image.Image ):
A_ , A_ : Dict = image.size
else:
A_ , A_ : Tuple = image.shape[1], image.shape[2]
if w < h:
A_ : str = int(self.size["shortest_edge"] * h / w )
A_ : Any = self.size["shortest_edge"]
elif w > h:
A_ : Optional[int] = self.size["shortest_edge"]
A_ : List[str] = int(self.size["shortest_edge"] * w / h )
else:
A_ : List[str] = self.size["shortest_edge"]
A_ : Optional[Any] = self.size["shortest_edge"]
else:
A_ : Tuple = []
for image in image_inputs:
A_ , A_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : Tuple = max(snake_case , key=lambda snake_case : item[0] )[0]
A_ : Union[str, Any] = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__UpperCamelCase = image_processing_class
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , "image_mean" ) )
self.assertTrue(hasattr(snake_case , "image_std" ) )
self.assertTrue(hasattr(snake_case , "do_normalize" ) )
self.assertTrue(hasattr(snake_case , "do_resize" ) )
self.assertTrue(hasattr(snake_case , "size" ) )
self.assertTrue(hasattr(snake_case , "ignore_index" ) )
self.assertTrue(hasattr(snake_case , "class_info_file" ) )
self.assertTrue(hasattr(snake_case , "num_text" ) )
self.assertTrue(hasattr(snake_case , "repo_path" ) )
self.assertTrue(hasattr(snake_case , "metadata" ) )
self.assertTrue(hasattr(snake_case , "do_reduce_labels" ) )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : str = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Optional[Any] = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : List[str] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
A_ : List[str] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : List[str] = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : int = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : Optional[Any] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
A_ : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : Any = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict=False , snake_case :str=False , snake_case :Dict="np" ):
'''simple docstring'''
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
A_ : Tuple = self.image_processing_tester.num_labels
A_ : str = None
A_ : Tuple = None
A_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
if with_segmentation_maps:
A_ : List[str] = num_labels
if is_instance_map:
A_ : List[str] = list(range(snake_case ) ) * 2
A_ : int = dict(enumerate(snake_case ) )
A_ : List[str] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
A_ : int = [Image.fromarray(snake_case ) for annotation in annotations]
A_ : List[str] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , snake_case , return_tensors="pt" , instance_id_to_semantic_id=snake_case , pad_and_return_pixel_mask=snake_case , )
return inputs
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
def common(snake_case :Dict=False , snake_case :Optional[int]=None ):
A_ : Tuple = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case , is_instance_map=snake_case , segmentation_type=snake_case )
A_ : Optional[Any] = inputs["mask_labels"]
A_ : List[Any] = inputs["class_labels"]
A_ : Optional[Any] = inputs["pixel_values"]
A_ : int = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case , snake_case , snake_case ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case )
common(is_instance_map=snake_case , segmentation_type="pil" )
common(is_instance_map=snake_case , segmentation_type="pil" )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = np.zeros((20, 50) )
A_ : List[str] = 1
A_ : int = 1
A_ : Optional[Any] = 1
A_ : Any = binary_mask_to_rle(snake_case )
self.assertEqual(len(snake_case ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : int = fature_extractor.post_process_semantic_segmentation(snake_case )
self.assertEqual(len(snake_case ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
A_ : Optional[int] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
A_ : List[Any] = fature_extractor.post_process_semantic_segmentation(snake_case , target_sizes=snake_case )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : str = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : Optional[Any] = image_processor.post_process_instance_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : Optional[Any] = image_processor.post_process_panoptic_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 300
| 1
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_lowerCAmelCase : Any = float('''nan''')
class __magic_name__ :
"""simple docstring"""
def __init__( self :str , snake_case :List[str] ):
'''simple docstring'''
A_ : Any = sys.stdout
A_ : Optional[int] = open(snake_case , "a" )
def __getattr__( self :Optional[int] , snake_case :Optional[Any] ):
'''simple docstring'''
return getattr(self.stdout , snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Dict ):
'''simple docstring'''
self.stdout.write(snake_case )
# strip tqdm codes
self.file.write(re.sub(R"^.*\r" , "" , snake_case , 0 , re.M ) )
def __snake_case ( _lowerCAmelCase : List[Any]=80 , _lowerCAmelCase : List[str]=False ) -> Tuple:
A_ : Tuple = []
# deal with critical env vars
A_ : Any = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
A_ : str = os.environ.get(_lowerCAmelCase , _lowerCAmelCase )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
A_ : List[str] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_lowerCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
A_ : str = []
A_ : Optional[int] = ""
while len(_lowerCAmelCase ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_lowerCAmelCase )
A_ : int = ""
return "\\\n".join(_lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ) -> List[Any]:
# unwrap multi-line input
A_ : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
A_ : Dict = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
A_ : List[str] = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> List[str]:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
A_ : str = subprocess.run(_lowerCAmelCase , capture_output=_lowerCAmelCase , text=_lowerCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
A_ : List[str] = variation.replace(" " , "-" )
with open(Path(_lowerCAmelCase ) / f"log.{prefix}.stdout.txt" , "w" ) as f:
f.write(result.stdout )
with open(Path(_lowerCAmelCase ) / f"log.{prefix}.stderr.txt" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f:
A_ : str = json.load(_lowerCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , ) -> Any:
A_ : int = []
A_ : List[str] = []
A_ : Optional[int] = f"{id}: {variation:<{longest_variation_len}}"
A_ : Tuple = f"{preamble}: "
A_ : List[Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_lowerCAmelCase ) , desc=_lowerCAmelCase , leave=_lowerCAmelCase ):
A_ : Optional[Any] = process_run_single(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A_ : Tuple = single_run_metrics[target_metric_key]
if not math.isnan(_lowerCAmelCase ):
metrics.append(_lowerCAmelCase )
results.append(_lowerCAmelCase )
outcome += "✓"
else:
outcome += "✘"
A_ : Any = f"\33[2K\r{outcome}"
if len(_lowerCAmelCase ) > 0:
A_ : Any = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
A_ : List[Any] = round(mean_metrics[target_metric_key] , 2 )
A_ : Optional[int] = f"{outcome} {mean_target}"
if len(_lowerCAmelCase ) > 1:
results_str += f" {tuple(round(_lowerCAmelCase , 2 ) for x in results )}"
print(_lowerCAmelCase )
A_ : int = variation
return mean_metrics
else:
print(_lowerCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __snake_case ( ) -> Union[str, Any]:
A_ : Optional[int] = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ) -> List[Any]:
A_ : int = pd.DataFrame(_lowerCAmelCase )
A_ : Optional[int] = "variation"
A_ : str = "diff_%"
A_ : Dict = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
A_ : Union[str, Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_lowerCAmelCase ):
# as a fallback, use the minimal value as the sentinel
A_ : Union[str, Any] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_lowerCAmelCase ):
A_ : Any = df.apply(
lambda _lowerCAmelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
A_ : Optional[Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
A_ : Optional[Any] = df.reindex(_lowerCAmelCase , axis="columns" ) # reorder cols
# capitalize
A_ : int = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
A_ : List[Any] = df.rename(lambda _lowerCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
A_ : Optional[int] = df.rename(lambda _lowerCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
A_ : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_lowerCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_lowerCAmelCase , floatfmt=".2f" )]
print("\n\n".join(_lowerCAmelCase ) )
def __snake_case ( ) -> str:
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_lowerCAmelCase , type=_lowerCAmelCase , nargs="+" , required=_lowerCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_lowerCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_lowerCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_lowerCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_lowerCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
A_ : Any = parser.parse_args()
A_ : Optional[int] = args.output_dir
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
A_ : Optional[int] = get_base_command(_lowerCAmelCase , _lowerCAmelCase )
# split each dimension into its --foo variations
A_ : Optional[Any] = [list(map(str.strip , re.split(r"\|" , _lowerCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
A_ : List[str] = list(map(str.strip , map(" ".join , itertools.product(*_lowerCAmelCase ) ) ) )
A_ : str = max(len(_lowerCAmelCase ) for x in variations )
# split wanted keys
A_ : Tuple = args.report_metric_keys.split()
# capture prints into a log file for convenience
A_ : Tuple = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
A_ : Dict = Tee(_lowerCAmelCase )
print(f"\n*** Running {len(_lowerCAmelCase )} benchmarks:" )
print(f"Base command: {' '.join(_lowerCAmelCase )}" )
A_ : Dict = "variation"
A_ : int = []
for id, variation in enumerate(tqdm(_lowerCAmelCase , desc="Total completion: " , leave=_lowerCAmelCase ) ):
A_ : Any = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.target_metric_key , _lowerCAmelCase , args.repeat_times , _lowerCAmelCase , args.verbose , ) )
process_results(_lowerCAmelCase , args.target_metric_key , _lowerCAmelCase , args.base_variation , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 300
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''data2vec-vision'''
def __init__( self :int , snake_case :Optional[int]=768 , snake_case :Any=12 , snake_case :Any=12 , snake_case :Tuple=3_072 , snake_case :Any="gelu" , snake_case :Tuple=0.0 , snake_case :int=0.0 , snake_case :Any=0.02 , snake_case :str=1e-12 , snake_case :List[str]=224 , snake_case :Dict=16 , snake_case :int=3 , snake_case :int=False , snake_case :str=False , snake_case :List[Any]=False , snake_case :Optional[Any]=False , snake_case :Tuple=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Any=True , snake_case :Optional[Any]=[3, 5, 7, 11] , snake_case :Dict=[1, 2, 3, 6] , snake_case :int=True , snake_case :List[Any]=0.4 , snake_case :Any=256 , snake_case :Union[str, Any]=1 , snake_case :Union[str, Any]=False , snake_case :Any=255 , **snake_case :int , ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : Dict = hidden_size
A_ : Tuple = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Any = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Any = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Optional[Any] = initializer_range
A_ : List[str] = layer_norm_eps
A_ : str = image_size
A_ : Optional[int] = patch_size
A_ : int = num_channels
A_ : Optional[Any] = use_mask_token
A_ : Optional[Any] = use_absolute_position_embeddings
A_ : Optional[int] = use_relative_position_bias
A_ : Dict = use_shared_relative_position_bias
A_ : Any = layer_scale_init_value
A_ : Optional[Any] = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : str = use_auxiliary_head
A_ : List[Any] = auxiliary_loss_weight
A_ : List[str] = auxiliary_channels
A_ : Dict = auxiliary_num_convs
A_ : List[str] = auxiliary_concat_input
A_ : Optional[int] = semantic_loss_ignore_index
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return 1e-4
| 300
| 1
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : float | Decimal , _lowerCAmelCase : float = 10**-10 ) -> float:
A_ : Dict = a
while True:
A_ : Union[str, Any] = Decimal(_lowerCAmelCase ) - (
Decimal(eval(_lowerCAmelCase ) ) / Decimal(eval(str(diff(_lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCAmelCase ) ) < precision: # noqa: S307
return float(_lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 300
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = ['''input_features''', '''attention_mask''']
def __init__( self :int , snake_case :int=80 , snake_case :Optional[int]=16_000 , snake_case :Tuple=0.0 , snake_case :Optional[int]=10 , snake_case :Optional[Any]=25 , snake_case :Dict="hamming_window" , snake_case :Tuple=32768.0 , snake_case :str=0.97 , snake_case :List[str]=1.0 , snake_case :Dict=True , snake_case :str=True , snake_case :Optional[Any]=False , **snake_case :Union[str, Any] , ):
'''simple docstring'''
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
A_ : Union[str, Any] = feature_size
A_ : int = sampling_rate
A_ : str = padding_value
A_ : int = hop_length
A_ : List[str] = win_length
A_ : Any = frame_signal_scale
A_ : str = preemphasis_coeff
A_ : List[str] = mel_floor
A_ : str = normalize_means
A_ : Any = normalize_vars
A_ : Optional[Any] = win_function
A_ : Dict = return_attention_mask
A_ : List[str] = win_length * sampling_rate // 1_000
A_ : List[str] = hop_length * sampling_rate // 1_000
A_ : List[str] = optimal_fft_length(self.sample_size )
A_ : str = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
A_ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case )
else:
A_ : List[str] = window_function(window_length=self.sample_size , name=self.win_function )
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
A_ : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=snake_case , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case , preemphasis=self.preemphasis_coeff , mel_filters=snake_case , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :str ):
'''simple docstring'''
if self.normalize_means:
A_ : int = x[:input_length].mean(axis=0 )
A_ : Any = np.subtract(snake_case , snake_case )
if self.normalize_vars:
A_ : List[Any] = x[:input_length].std(axis=0 )
A_ : Optional[int] = np.divide(snake_case , snake_case )
if input_length < x.shape[0]:
A_ : Optional[int] = padding_value
# make sure array is in float32
A_ : Union[str, Any] = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[np.ndarray] , snake_case :Optional[np.ndarray] = None ):
'''simple docstring'''
A_ : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case , snake_case , self.padding_value ) for x, n in zip(snake_case , snake_case )]
def __call__( self :int , snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case :Union[bool, str, PaddingStrategy] = False , snake_case :Optional[int] = None , snake_case :bool = False , snake_case :Optional[int] = None , snake_case :Optional[bool] = None , snake_case :Optional[Union[str, TensorType]] = None , snake_case :Optional[int] = None , **snake_case :Dict , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
A_ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
A_ : Optional[Any] = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : List[Any] = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
A_ : int = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Tuple = [raw_speech]
# extract fbank features
A_ : int = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
A_ : Union[str, Any] = BatchFeature({"input_features": features} )
A_ : str = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
# make sure list is in array format
A_ : Optional[int] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , snake_case ):
A_ : Union[str, Any] = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features]
A_ : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
A_ : Any = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
A_ : Dict = (
np.array(snake_case , dtype=np.intaa )
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
A_ : Optional[int] = self.normalize(
padded_inputs["input_features"] , attention_mask=snake_case )
if return_tensors is not None:
A_ : Dict = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 300
| 1
|
import os
from datetime import datetime as dt
from github import Github
_lowerCAmelCase : str = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def __snake_case ( ) -> int:
A_ : str = Github(os.environ["GITHUB_TOKEN"] )
A_ : List[str] = g.get_repo("huggingface/accelerate" )
A_ : int = repo.get_issues(state="open" )
for issue in open_issues:
A_ : int = sorted([comment for comment in issue.get_comments()] , key=lambda _lowerCAmelCase : i.created_at , reverse=_lowerCAmelCase )
A_ : Optional[int] = comments[0] if len(_lowerCAmelCase ) > 0 else None
A_ : Optional[int] = dt.utcnow()
A_ : List[Any] = (current_time - issue.updated_at).days
A_ : Optional[int] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 300
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self :List[Any] , snake_case :int , snake_case :int , snake_case :Optional[int] = None , snake_case :int = 50_257 , snake_case :int = 1_024 , snake_case :int = 768 , snake_case :int = 12 , snake_case :int = 12 , snake_case :Optional[int] = None , snake_case :str = "gelu_new" , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 1e-5 , snake_case :float = 0.02 , snake_case :bool = True , snake_case :bool = True , snake_case :bool = False , snake_case :bool = False , ):
'''simple docstring'''
super().__init__()
A_ : Tuple = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
A_ : List[Any] = prefix_inner_dim
A_ : Union[str, Any] = prefix_hidden_dim
A_ : List[str] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A_ : List[Any] = (
nn.Linear(self.prefix_hidden_dim , snake_case ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A_ : List[Any] = GPTaConfig(
vocab_size=snake_case , n_positions=snake_case , n_embd=snake_case , n_layer=snake_case , n_head=snake_case , n_inner=snake_case , activation_function=snake_case , resid_pdrop=snake_case , embd_pdrop=snake_case , attn_pdrop=snake_case , layer_norm_epsilon=snake_case , initializer_range=snake_case , scale_attn_weights=snake_case , use_cache=snake_case , scale_attn_by_inverse_layer_idx=snake_case , reorder_and_upcast_attn=snake_case , )
A_ : Optional[Any] = GPTaLMHeadModel(snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.Tensor , snake_case :torch.Tensor , snake_case :Optional[torch.Tensor] = None , snake_case :Optional[torch.Tensor] = None , ):
'''simple docstring'''
A_ : Any = self.transformer.transformer.wte(snake_case )
A_ : str = self.encode_prefix(snake_case )
A_ : Union[str, Any] = self.decode_prefix(snake_case )
A_ : int = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A_ : Dict = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A_ : int = torch.cat((dummy_token, input_ids) , dim=1 )
A_ : Union[str, Any] = self.transformer(inputs_embeds=snake_case , labels=snake_case , attention_mask=snake_case )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE ( self :str , snake_case :int , snake_case :torch.device ):
'''simple docstring'''
return torch.zeros(snake_case , self.prefix_length , dtype=torch.intaa , device=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :int ):
'''simple docstring'''
return self.encode_prefix(snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Dict , snake_case :Optional[int] , snake_case :Any ):
'''simple docstring'''
A_ : Any = torch.split(snake_case , 1 , dim=0 )
A_ : Optional[int] = []
A_ : Union[str, Any] = []
for feature in features:
A_ : Tuple = self.decode_prefix(feature.to(snake_case ) ) # back to the clip feature
# Only support beam search for now
A_ , A_ : Dict = self.generate_beam(
input_embeds=snake_case , device=snake_case , eos_token_id=snake_case )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A_ : int = torch.stack(snake_case )
A_ : int = torch.stack(snake_case )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int=None , snake_case :str=None , snake_case :int=None , snake_case :int = 5 , snake_case :int = 67 , snake_case :float = 1.0 , snake_case :Optional[int] = None , ):
'''simple docstring'''
A_ : Optional[Any] = eos_token_id
A_ : List[Any] = None
A_ : List[Any] = None
A_ : str = torch.ones(snake_case , device=snake_case , dtype=torch.int )
A_ : Any = torch.zeros(snake_case , device=snake_case , dtype=torch.bool )
if input_embeds is not None:
A_ : Any = input_embeds
else:
A_ : Optional[Any] = self.transformer.transformer.wte(snake_case )
for i in range(snake_case ):
A_ : Optional[Any] = self.transformer(inputs_embeds=snake_case )
A_ : str = outputs.logits
A_ : int = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A_ : List[str] = logits.softmax(-1 ).log()
if scores is None:
A_ , A_ : Union[str, Any] = logits.topk(snake_case , -1 )
A_ : Tuple = generated.expand(snake_case , *generated.shape[1:] )
A_ , A_ : str = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A_ : Union[str, Any] = next_tokens
else:
A_ : List[str] = tokens.expand(snake_case , *tokens.shape[1:] )
A_ : Union[str, Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
A_ : List[str] = -float(np.inf )
A_ : List[Any] = 0
A_ : Union[str, Any] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A_ : Optional[Any] = scores_sum / seq_lengths[:, None]
A_ , A_ : List[str] = scores_sum_average.view(-1 ).topk(snake_case , -1 )
A_ : str = next_tokens // scores_sum.shape[1]
A_ : Union[str, Any] = seq_lengths[next_tokens_source]
A_ : Optional[int] = next_tokens % scores_sum.shape[1]
A_ : Tuple = next_tokens.unsqueeze(1 )
A_ : Tuple = tokens[next_tokens_source]
A_ : Dict = torch.cat((tokens, next_tokens) , dim=1 )
A_ : Dict = generated[next_tokens_source]
A_ : Union[str, Any] = scores_sum_average * seq_lengths
A_ : Optional[int] = is_stopped[next_tokens_source]
A_ : Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A_ : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1 )
A_ : Any = is_stopped + next_tokens.eq(snake_case ).squeeze()
if is_stopped.all():
break
A_ : int = scores / seq_lengths
A_ : str = scores.argsort(descending=snake_case )
# tokens tensors are already padded to max_seq_length
A_ : Dict = [tokens[i] for i in order]
A_ : int = torch.stack(snake_case , dim=0 )
A_ : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 300
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : Dict = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 300
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *snake_case :Tuple , **snake_case :Any ):
'''simple docstring'''
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , snake_case , )
super().__init__(*snake_case , **snake_case )
| 300
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''instructblip_vision_model'''
def __init__( self :Union[str, Any] , snake_case :Tuple=1_408 , snake_case :Tuple=6_144 , snake_case :Optional[int]=39 , snake_case :str=16 , snake_case :str=224 , snake_case :List[Any]=14 , snake_case :Tuple="gelu" , snake_case :Optional[int]=1e-6 , snake_case :Union[str, Any]=0.0 , snake_case :List[str]=1e-10 , snake_case :str=True , **snake_case :Any , ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : str = hidden_size
A_ : Dict = intermediate_size
A_ : int = num_hidden_layers
A_ : int = num_attention_heads
A_ : Any = patch_size
A_ : Union[str, Any] = image_size
A_ : Optional[int] = initializer_range
A_ : Any = attention_dropout
A_ : Union[str, Any] = layer_norm_eps
A_ : Any = hidden_act
A_ : str = qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE ( cls :Union[str, Any] , snake_case :Union[str, os.PathLike] , **snake_case :int ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case )
A_ , A_ : List[str] = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
A_ : List[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(snake_case , **snake_case )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''instructblip_qformer'''
def __init__( self :Union[str, Any] , snake_case :Optional[Any]=30_522 , snake_case :str=768 , snake_case :Any=12 , snake_case :Dict=12 , snake_case :Optional[int]=3_072 , snake_case :int="gelu" , snake_case :int=0.1 , snake_case :int=0.1 , snake_case :Optional[Any]=512 , snake_case :Tuple=0.02 , snake_case :Tuple=1e-12 , snake_case :str=0 , snake_case :Union[str, Any]="absolute" , snake_case :str=2 , snake_case :Optional[Any]=1_408 , **snake_case :Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case , **snake_case )
A_ : Dict = vocab_size
A_ : List[str] = hidden_size
A_ : Dict = num_hidden_layers
A_ : int = num_attention_heads
A_ : Optional[Any] = hidden_act
A_ : List[str] = intermediate_size
A_ : Dict = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Tuple = max_position_embeddings
A_ : Any = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Union[str, Any] = position_embedding_type
A_ : Union[str, Any] = cross_attention_frequency
A_ : List[str] = encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE ( cls :str , snake_case :Union[str, os.PathLike] , **snake_case :str ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case )
A_ , A_ : List[Any] = cls.get_config_dict(snake_case , **snake_case )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
A_ : List[Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(snake_case , **snake_case )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''instructblip'''
__UpperCamelCase = True
def __init__( self :Optional[Any] , snake_case :Optional[int]=None , snake_case :int=None , snake_case :Optional[int]=None , snake_case :Any=32 , **snake_case :List[str] ):
'''simple docstring'''
super().__init__(**snake_case )
if vision_config is None:
A_ : Optional[Any] = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
A_ : Optional[Any] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
A_ : Optional[int] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
A_ : Tuple = InstructBlipVisionConfig(**snake_case )
A_ : Any = InstructBlipQFormerConfig(**snake_case )
A_ : Tuple = text_config["model_type"] if "model_type" in text_config else "opt"
A_ : Union[str, Any] = CONFIG_MAPPING[text_model_type](**snake_case )
A_ : Optional[int] = self.text_config.tie_word_embeddings
A_ : Union[str, Any] = self.text_config.is_encoder_decoder
A_ : Dict = num_query_tokens
A_ : Any = self.vision_config.hidden_size
A_ : Optional[int] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
A_ : Optional[Any] = 1.0
A_ : Union[str, Any] = 0.02
@classmethod
def SCREAMING_SNAKE_CASE ( cls :List[Any] , snake_case :InstructBlipVisionConfig , snake_case :InstructBlipQFormerConfig , snake_case :PretrainedConfig , **snake_case :str , ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **snake_case , )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Tuple = copy.deepcopy(self.__dict__ )
A_ : Any = self.vision_config.to_dict()
A_ : Optional[Any] = self.qformer_config.to_dict()
A_ : Optional[int] = self.text_config.to_dict()
A_ : Optional[Any] = self.__class__.model_type
return output
| 300
|
from __future__ import annotations
def __snake_case ( _lowerCAmelCase : list[float] ) -> bool:
if len(_lowerCAmelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A_ : List[str] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 1
|
from __future__ import annotations
import math
def __snake_case ( _lowerCAmelCase : list , _lowerCAmelCase : list ) -> list:
if len(_lowerCAmelCase ) != 2 or len(a[0] ) != 2 or len(_lowerCAmelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
A_ : Any = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __snake_case ( _lowerCAmelCase : list , _lowerCAmelCase : list ) -> List[Any]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_lowerCAmelCase ) )
]
def __snake_case ( _lowerCAmelCase : list , _lowerCAmelCase : list ) -> List[str]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_lowerCAmelCase ) )
]
def __snake_case ( _lowerCAmelCase : list ) -> tuple[list, list, list, list]:
if len(_lowerCAmelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
A_ : Tuple = len(_lowerCAmelCase )
A_ : Any = matrix_length // 2
A_ : Union[str, Any] = [[a[i][j] for j in range(_lowerCAmelCase , _lowerCAmelCase )] for i in range(_lowerCAmelCase )]
A_ : Any = [
[a[i][j] for j in range(_lowerCAmelCase , _lowerCAmelCase )] for i in range(_lowerCAmelCase , _lowerCAmelCase )
]
A_ : Dict = [[a[i][j] for j in range(_lowerCAmelCase )] for i in range(_lowerCAmelCase )]
A_ : Optional[int] = [[a[i][j] for j in range(_lowerCAmelCase )] for i in range(_lowerCAmelCase , _lowerCAmelCase )]
return top_left, top_right, bot_left, bot_right
def __snake_case ( _lowerCAmelCase : list ) -> tuple[int, int]:
return len(_lowerCAmelCase ), len(matrix[0] )
def __snake_case ( _lowerCAmelCase : list ) -> None:
print("\n".join(str(_lowerCAmelCase ) for line in matrix ) )
def __snake_case ( _lowerCAmelCase : list , _lowerCAmelCase : list ) -> list:
if matrix_dimensions(_lowerCAmelCase ) == (2, 2):
return default_matrix_multiplication(_lowerCAmelCase , _lowerCAmelCase )
A_ , A_ , A_ , A_ : int = split_matrix(_lowerCAmelCase )
A_ , A_ , A_ , A_ : Union[str, Any] = split_matrix(_lowerCAmelCase )
A_ : List[str] = actual_strassen(_lowerCAmelCase , matrix_subtraction(_lowerCAmelCase , _lowerCAmelCase ) )
A_ : Optional[Any] = actual_strassen(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
A_ : Union[str, Any] = actual_strassen(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
A_ : Optional[Any] = actual_strassen(_lowerCAmelCase , matrix_subtraction(_lowerCAmelCase , _lowerCAmelCase ) )
A_ : Union[str, Any] = actual_strassen(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) )
A_ : Optional[Any] = actual_strassen(matrix_subtraction(_lowerCAmelCase , _lowerCAmelCase ) , matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) )
A_ : int = actual_strassen(matrix_subtraction(_lowerCAmelCase , _lowerCAmelCase ) , matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) )
A_ : Optional[Any] = matrix_addition(matrix_subtraction(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) , _lowerCAmelCase )
A_ : Union[str, Any] = matrix_addition(_lowerCAmelCase , _lowerCAmelCase )
A_ : str = matrix_addition(_lowerCAmelCase , _lowerCAmelCase )
A_ : Optional[int] = matrix_subtraction(matrix_subtraction(matrix_addition(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) , _lowerCAmelCase )
# construct the new matrix from our 4 quadrants
A_ : Union[str, Any] = []
for i in range(len(_lowerCAmelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_lowerCAmelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __snake_case ( _lowerCAmelCase : list , _lowerCAmelCase : list ) -> list:
if matrix_dimensions(_lowerCAmelCase )[1] != matrix_dimensions(_lowerCAmelCase )[0]:
A_ : List[Any] = (
"Unable to multiply these matrices, please check the dimensions.\n"
f"Matrix A: {matrixa}\n"
f"Matrix B: {matrixa}"
)
raise Exception(_lowerCAmelCase )
A_ : Tuple = matrix_dimensions(_lowerCAmelCase )
A_ : Tuple = matrix_dimensions(_lowerCAmelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
A_ : List[str] = max(*_lowerCAmelCase , *_lowerCAmelCase )
A_ : Optional[Any] = int(math.pow(2 , math.ceil(math.loga(_lowerCAmelCase ) ) ) )
A_ : str = matrixa
A_ : int = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
A_ : List[Any] = actual_strassen(_lowerCAmelCase , _lowerCAmelCase )
# Removing the additional zeros
for i in range(0 , _lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCAmelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_lowerCAmelCase : int = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_lowerCAmelCase : Tuple = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 300
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , snake_case :AutoencoderKL , snake_case :CLIPTextModel , snake_case :CLIPTokenizer , snake_case :UNetaDConditionModel , snake_case :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case :StableDiffusionSafetyChecker , snake_case :CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def __call__( self :Any , snake_case :Union[str, List[str]] , snake_case :int = 512 , snake_case :int = 512 , snake_case :int = 50 , snake_case :float = 7.5 , snake_case :Optional[Union[str, List[str]]] = None , snake_case :Optional[int] = 1 , snake_case :float = 0.0 , snake_case :Optional[torch.Generator] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[str] = "pil" , snake_case :bool = True , snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case :int = 1 , snake_case :Optional[torch.FloatTensor] = None , **snake_case :Optional[Any] , ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
A_ : Dict = 1
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = len(snake_case )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case )}." )
# get prompt text embeddings
A_ : int = self.tokenizer(
snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_ , A_ , A_ : int = text_embeddings.shape
A_ : List[str] = text_embeddings.repeat(1 , snake_case , 1 )
A_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ : List[str]
if negative_prompt is None:
A_ : List[str] = [""]
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !="
f" {type(snake_case )}." )
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A_ : Any = negative_prompt
A_ : Optional[int] = text_input_ids.shape[-1]
A_ : Dict = self.tokenizer(
snake_case , padding="max_length" , max_length=snake_case , truncation=snake_case , return_tensors="pt" , )
A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ : Tuple = uncond_embeddings.shape[1]
A_ : Dict = uncond_embeddings.repeat(snake_case , snake_case , 1 )
A_ : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A_ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ : Tuple = torch.randn(
snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(self.device )
A_ : Optional[Any] = torch.randn(snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(
self.device )
else:
A_ : int = torch.randn(
snake_case , generator=snake_case , device=self.device , dtype=snake_case )
A_ : Optional[int] = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A_ : Tuple = latents_reference.to(self.device )
A_ : Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A_ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A_ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A_ : Optional[Any] = 0 if dx < 0 else dx
A_ : Optional[Any] = 0 if dy < 0 else dy
A_ : List[str] = max(-dx , 0 )
A_ : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A_ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ : str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : List[str] = {}
if accepts_eta:
A_ : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
A_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
A_ : List[str] = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.chunk(2 )
A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
A_ : List[str] = 1 / 0.18215 * latents
A_ : Tuple = self.vae.decode(snake_case ).sample
A_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A_ : int = self.feature_extractor(self.numpy_to_pil(snake_case ) , return_tensors="pt" ).to(
self.device )
A_ , A_ : List[str] = self.safety_checker(
images=snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A_ : List[str] = None
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
| 300
| 1
|
from __future__ import annotations
from typing import Any
class __magic_name__ :
"""simple docstring"""
def __init__( self :List[Any] , snake_case :int , snake_case :int , snake_case :float = 0 ):
'''simple docstring'''
A_ , A_ : Any = row, column
A_ : Union[str, Any] = [[default_value for c in range(snake_case )] for r in range(snake_case )]
def __str__( self :int ):
'''simple docstring'''
A_ : str = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
A_ : Any = 0
for row_vector in self.array:
for obj in row_vector:
A_ : Tuple = max(snake_case , len(str(snake_case ) ) )
A_ : Optional[int] = f"%{max_element_length}s"
# Make string and return
def single_line(snake_case :list[float] ) -> str:
nonlocal string_format_identifier
A_ : Any = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(snake_case ) for row_vector in self.array )
return s
def __repr__( self :List[Any] ):
'''simple docstring'''
return str(self )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :tuple[int, int] ):
'''simple docstring'''
if not (isinstance(snake_case , (list, tuple) ) and len(snake_case ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self :Dict , snake_case :tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(snake_case )
return self.array[loc[0]][loc[1]]
def __setitem__( self :Optional[Any] , snake_case :tuple[int, int] , snake_case :float ):
'''simple docstring'''
assert self.validate_indicies(snake_case )
A_ : Union[str, Any] = value
def __add__( self :List[str] , snake_case :Matrix ):
'''simple docstring'''
assert isinstance(snake_case , snake_case )
assert self.row == another.row and self.column == another.column
# Add
A_ : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A_ : Optional[Any] = self[r, c] + another[r, c]
return result
def __neg__( self :Any ):
'''simple docstring'''
A_ : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A_ : Optional[int] = -self[r, c]
return result
def __sub__( self :Any , snake_case :Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self :List[Any] , snake_case :int | float | Matrix ):
'''simple docstring'''
if isinstance(snake_case , (int, float) ): # Scalar multiplication
A_ : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A_ : str = self[r, c] * another
return result
elif isinstance(snake_case , snake_case ): # Matrix multiplication
assert self.column == another.row
A_ : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
A_ : Any = f"Unsupported type given for another ({type(snake_case )})"
raise TypeError(snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Any = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
A_ : int = self[r, c]
return result
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Matrix , snake_case :Matrix ):
'''simple docstring'''
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
A_ : Optional[Any] = v.transpose()
A_ : Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __snake_case ( ) -> None:
# a^(-1)
A_ : Optional[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
A_ : List[Any] = 1
print(f"a^(-1) is {ainv}" )
# u, v
A_ : Optional[Any] = Matrix(3 , 1 , 0 )
A_ , A_ , A_ : Union[str, Any] = 1, 2, -3
A_ : Tuple = Matrix(3 , 1 , 0 )
A_ , A_ , A_ : Any = 4, -2, 5
print(f"u is {u}" )
print(f"v is {v}" )
print(f"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(f"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCAmelCase , _lowerCAmelCase )}" )
def __snake_case ( ) -> None:
import doctest
doctest.testmod()
testa()
| 300
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Dict:
A_ : Optional[Any] = nn.functional.normalize(_lowerCAmelCase )
A_ : List[str] = nn.functional.normalize(_lowerCAmelCase )
return torch.mm(_lowerCAmelCase , normalized_text_embeds.t() )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self :int , snake_case :CLIPConfig ):
'''simple docstring'''
super().__init__(snake_case )
A_ : int = CLIPVisionModel(config.vision_config )
A_ : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case )
A_ : Tuple = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case )
A_ : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case )
A_ : List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=snake_case )
A_ : int = nn.Parameter(torch.ones(3 ) , requires_grad=snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ):
'''simple docstring'''
A_ : List[Any] = self.vision_model(snake_case )[1] # pooled_output
A_ : List[Any] = self.visual_projection(snake_case )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Optional[Any] = cosine_distance(snake_case , self.special_care_embeds ).cpu().float().numpy()
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ).cpu().float().numpy()
A_ : Union[str, Any] = []
A_ : Any = image_embeds.shape[0]
for i in range(snake_case ):
A_ : Optional[int] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A_ : Optional[Any] = special_cos_dist[i][concept_idx]
A_ : Tuple = self.special_care_embeds_weights[concept_idx].item()
A_ : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
A_ : Any = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A_ : Tuple = cos_dist[i][concept_idx]
A_ : Tuple = self.concept_embeds_weights[concept_idx].item()
A_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(snake_case )
result.append(snake_case )
A_ : Any = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor ):
'''simple docstring'''
A_ : List[str] = self.vision_model(snake_case )[1] # pooled_output
A_ : int = self.visual_projection(snake_case )
A_ : Tuple = cosine_distance(snake_case , self.special_care_embeds )
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
A_ : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A_ : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
A_ : Optional[Any] = special_care * 0.01
A_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A_ : Union[str, Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A_ : Union[str, Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 300
| 1
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
_lowerCAmelCase : str = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
_lowerCAmelCase : List[Any] = {
'''ctrl''': 256,
}
_lowerCAmelCase : Optional[Any] = {
'''Pregnancy''': 168_629,
'''Christianity''': 7_675,
'''Explain''': 106_423,
'''Fitness''': 63_440,
'''Saving''': 63_163,
'''Ask''': 27_171,
'''Ass''': 95_985,
'''Joke''': 163_509,
'''Questions''': 45_622,
'''Thoughts''': 49_605,
'''Retail''': 52_342,
'''Feminism''': 164_338,
'''Writing''': 11_992,
'''Atheism''': 192_263,
'''Netflix''': 48_616,
'''Computing''': 39_639,
'''Opinion''': 43_213,
'''Alone''': 44_967,
'''Funny''': 58_917,
'''Gaming''': 40_358,
'''Human''': 4_088,
'''India''': 1_331,
'''Joker''': 77_138,
'''Diet''': 36_206,
'''Legal''': 11_859,
'''Norman''': 4_939,
'''Tip''': 72_689,
'''Weight''': 52_343,
'''Movies''': 46_273,
'''Running''': 23_425,
'''Science''': 2_090,
'''Horror''': 37_793,
'''Confession''': 60_572,
'''Finance''': 12_250,
'''Politics''': 16_360,
'''Scary''': 191_985,
'''Support''': 12_654,
'''Technologies''': 32_516,
'''Teenage''': 66_160,
'''Event''': 32_769,
'''Learned''': 67_460,
'''Notion''': 182_770,
'''Wikipedia''': 37_583,
'''Books''': 6_665,
'''Extract''': 76_050,
'''Confessions''': 102_701,
'''Conspiracy''': 75_932,
'''Links''': 63_674,
'''Narcissus''': 150_425,
'''Relationship''': 54_766,
'''Relationships''': 134_796,
'''Reviews''': 41_671,
'''News''': 4_256,
'''Translation''': 26_820,
'''multilingual''': 128_406,
}
def __snake_case ( _lowerCAmelCase : int ) -> str:
A_ : List[Any] = set()
A_ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A_ : Tuple = char
A_ : List[str] = set(_lowerCAmelCase )
return pairs
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = CONTROL_CODES
def __init__( self :List[str] , snake_case :str , snake_case :int , snake_case :List[Any]="<unk>" , **snake_case :Union[str, Any] ):
'''simple docstring'''
super().__init__(unk_token=snake_case , **snake_case )
with open(snake_case , encoding="utf-8" ) as vocab_handle:
A_ : Any = json.load(snake_case )
A_ : Tuple = {v: k for k, v in self.encoder.items()}
with open(snake_case , encoding="utf-8" ) as merges_handle:
A_ : Tuple = merges_handle.read().split("\n" )[1:-1]
A_ : Tuple = [tuple(merge.split() ) for merge in merges]
A_ : int = dict(zip(snake_case , range(len(snake_case ) ) ) )
A_ : List[Any] = {}
@property
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :List[str] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ : Optional[Any] = tuple(snake_case )
A_ : Union[str, Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A_ : Dict = get_pairs(snake_case )
if not pairs:
return token
while True:
A_ : Union[str, Any] = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Tuple = bigram
A_ : int = []
A_ : List[Any] = 0
while i < len(snake_case ):
try:
A_ : Optional[Any] = word.index(snake_case , snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : List[str] = j
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : Union[str, Any] = tuple(snake_case )
A_ : Union[str, Any] = new_word
if len(snake_case ) == 1:
break
else:
A_ : Any = get_pairs(snake_case )
A_ : Tuple = "@@ ".join(snake_case )
A_ : Any = word[:-4]
A_ : List[str] = word
return word
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Dict ):
'''simple docstring'''
A_ : Union[str, Any] = []
A_ : Tuple = re.findall(R"\S+\n?" , snake_case )
for token in words:
split_tokens.extend(list(self.bpe(snake_case ).split(" " ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Optional[Any] ):
'''simple docstring'''
return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Optional[int] ):
'''simple docstring'''
return self.decoder.get(snake_case , self.unk_token )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int ):
'''simple docstring'''
A_ : Optional[Any] = " ".join(snake_case ).replace("@@ " , "" ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A_ : Tuple = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A_ : Optional[int] = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + "\n" )
A_ : List[str] = 0
with open(snake_case , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
A_ : Union[str, Any] = token_index
writer.write(" ".join(snake_case ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 300
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
A_ : Tuple = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Dict:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : str = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" )
A_ : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : Optional[Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Optional[Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ) -> Any:
A_ : Dict = dct.pop(_lowerCAmelCase )
A_ : List[Any] = val
def __snake_case ( _lowerCAmelCase : List[str] ) -> int:
if "handwritten" in checkpoint_url:
A_ : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
A_ : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
A_ : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=_lowerCAmelCase )
A_ : Tuple = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : Tuple = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Optional[Any] = 1024
A_ : Union[str, Any] = 4096
A_ : Union[str, Any] = 24
A_ : List[Any] = 16
A_ : List[str] = 1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Dict = False
A_ : int = "relu"
A_ : Optional[int] = 1024
A_ : Any = True
A_ : List[Any] = False
A_ : Optional[int] = False
# load HuggingFace model
A_ : Union[str, Any] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase )
A_ : str = TrOCRForCausalLM(_lowerCAmelCase )
A_ : List[str] = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
A_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" , check_hash=_lowerCAmelCase )["model"]
A_ : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Dict = state_dict.pop(_lowerCAmelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
A_ : List[str] = val
else:
A_ : Optional[Any] = val
# load state dict
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A_ : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
A_ : Any = RobertaTokenizer.from_pretrained("roberta-large" )
A_ : Union[str, Any] = TrOCRProcessor(_lowerCAmelCase , _lowerCAmelCase )
A_ : List[str] = processor(images=prepare_img(_lowerCAmelCase ) , return_tensors="pt" ).pixel_values
# verify logits
A_ : Union[str, Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Optional[int] = model(pixel_values=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
A_ : Tuple = outputs.logits
A_ : Union[str, Any] = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Union[str, Any] = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : str = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 300
| 1
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCAmelCase : int = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
_lowerCAmelCase : Tuple = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
_lowerCAmelCase : int = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[List[List[str]]] , snake_case :List[List[str]] , snake_case :int = 1 , snake_case :int = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case , hypotheses=snake_case , min_len=snake_case , max_len=snake_case )
}
| 300
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 1
@register_to_config
def __init__( self :Union[str, Any] , snake_case :int = 2_000 , snake_case :float = 0.15 , snake_case :float = 0.01 , snake_case :float = 1348.0 , snake_case :float = 1e-5 , snake_case :int = 1 , ):
'''simple docstring'''
A_ : Dict = sigma_max
# setable values
A_ : List[Any] = None
self.set_sigmas(snake_case , snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :torch.FloatTensor , snake_case :Optional[int] = None ):
'''simple docstring'''
return sample
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :float = None , snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
A_ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
A_ : Tuple = torch.linspace(1 , snake_case , snake_case , device=snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int , snake_case :float = None , snake_case :float = None , snake_case :float = None ):
'''simple docstring'''
A_ : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min
A_ : Any = sigma_max if sigma_max is not None else self.config.sigma_max
A_ : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(snake_case , snake_case )
A_ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
A_ : Any = torch.exp(torch.linspace(math.log(snake_case ) , math.log(snake_case ) , snake_case ) )
A_ : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Dict ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :int , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
A_ : int = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
A_ : Optional[Any] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
A_ : Dict = timesteps.to(self.discrete_sigmas.device )
A_ : Optional[int] = self.discrete_sigmas[timesteps].to(sample.device )
A_ : int = self.get_adjacent_sigma(snake_case , snake_case ).to(sample.device )
A_ : Union[str, Any] = torch.zeros_like(snake_case )
A_ : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
A_ : Optional[int] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
A_ : Tuple = diffusion.unsqueeze(-1 )
A_ : Optional[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
A_ : List[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=snake_case , device=sample.device , dtype=sample.dtype )
A_ : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
A_ : Any = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=snake_case , prev_sample_mean=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
A_ : Dict = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
A_ : int = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
A_ : List[Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
A_ : Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
A_ : Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
A_ : int = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
A_ : str = step_size.unsqueeze(-1 )
A_ : Optional[Any] = sample + step_size * model_output
A_ : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , ):
'''simple docstring'''
A_ : Union[str, Any] = timesteps.to(original_samples.device )
A_ : List[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
A_ : List[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(snake_case ) * sigmas[:, None, None, None]
)
A_ : Optional[int] = noise + original_samples
return noisy_samples
def __len__( self :Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 1
|
from math import factorial
def __snake_case ( _lowerCAmelCase : int = 100 ) -> int:
return sum(int(_lowerCAmelCase ) for x in str(factorial(_lowerCAmelCase ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 300
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : float | Decimal , _lowerCAmelCase : float = 10**-10 ) -> float:
A_ : Dict = a
while True:
A_ : Union[str, Any] = Decimal(_lowerCAmelCase ) - (
Decimal(eval(_lowerCAmelCase ) ) / Decimal(eval(str(diff(_lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCAmelCase ) ) < precision: # noqa: S307
return float(_lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 300
| 1
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , snake_case :Distribution , snake_case :int=None , snake_case :str=None , snake_case :Dict=0 ):
'''simple docstring'''
A_ : Dict = 1.0 if scale is None else scale
A_ : List[str] = 0.0 if loc is None else loc
super().__init__(snake_case , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=snake_case )] )
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return self.variance.sqrt()
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self :Optional[int] , snake_case :int , snake_case :Dict[str, int] , snake_case :Callable[..., Tuple[torch.Tensor]] , **snake_case :int ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : int = args_dim
A_ : List[Any] = nn.ModuleList([nn.Linear(snake_case , snake_case ) for dim in args_dim.values()] )
A_ : Optional[Any] = domain_map
def SCREAMING_SNAKE_CASE ( self :str , snake_case :torch.Tensor ):
'''simple docstring'''
A_ : Tuple = [proj(snake_case ) for proj in self.proj]
return self.domain_map(*snake_case )
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self :Optional[Any] , snake_case :Optional[Any] ):
'''simple docstring'''
super().__init__()
A_ : Optional[Any] = function
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[str] , *snake_case :str ):
'''simple docstring'''
return self.function(snake_case , *snake_case )
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self :Dict , snake_case :int = 1 ):
'''simple docstring'''
A_ : Union[str, Any] = dim
A_ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[Any] ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*snake_case )
else:
return Independent(self.distribution_class(*snake_case ) , 1 )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :List[Any] , snake_case :Optional[torch.Tensor] = None , snake_case :Optional[torch.Tensor] = None , ):
'''simple docstring'''
A_ : Optional[int] = self._base_distribution(snake_case )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(snake_case , loc=snake_case , scale=snake_case , event_dim=self.event_dim )
@property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return len(self.event_shape )
@property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int ):
'''simple docstring'''
return ParameterProjection(
in_features=snake_case , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def SCREAMING_SNAKE_CASE ( self :Tuple , *snake_case :torch.Tensor ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :torch.Tensor ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(snake_case ) + 4.0 )) / 2.0
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase = StudentT
@classmethod
def SCREAMING_SNAKE_CASE ( cls :Tuple , snake_case :torch.Tensor , snake_case :torch.Tensor , snake_case :torch.Tensor ):
'''simple docstring'''
A_ : Dict = cls.squareplus(snake_case ).clamp_min(torch.finfo(scale.dtype ).eps )
A_ : Optional[int] = 2.0 + cls.squareplus(snake_case )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = {"loc": 1, "scale": 1}
__UpperCamelCase = Normal
@classmethod
def SCREAMING_SNAKE_CASE ( cls :Any , snake_case :torch.Tensor , snake_case :torch.Tensor ):
'''simple docstring'''
A_ : Tuple = cls.squareplus(snake_case ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = {"total_count": 1, "logits": 1}
__UpperCamelCase = NegativeBinomial
@classmethod
def SCREAMING_SNAKE_CASE ( cls :Optional[Any] , snake_case :torch.Tensor , snake_case :torch.Tensor ):
'''simple docstring'''
A_ : Any = cls.squareplus(snake_case )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Union[str, Any] ):
'''simple docstring'''
A_ , A_ : Any = distr_args
if self.dim == 1:
return self.distribution_class(total_count=snake_case , logits=snake_case )
else:
return Independent(self.distribution_class(total_count=snake_case , logits=snake_case ) , 1 )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int , snake_case :Optional[torch.Tensor] = None , snake_case :Optional[torch.Tensor] = None ):
'''simple docstring'''
A_ , A_ : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 300
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowerCAmelCase : List[Any] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
_lowerCAmelCase : Union[str, Any] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
_lowerCAmelCase : Optional[Any] = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Optional[int] , snake_case :List[Any] , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , ):
'''simple docstring'''
A_ : List[str] = len(references[0] )
if any(len(snake_case ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A_ : int = [[refs[i] for refs in references] for i in range(snake_case )]
A_ : Optional[Any] = TER(
normalized=snake_case , no_punct=snake_case , asian_support=snake_case , case_sensitive=snake_case , )
A_ : List[Any] = sb_ter.corpus_score(snake_case , snake_case )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 300
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''visual_bert'''
def __init__( self :Any , snake_case :List[Any]=30_522 , snake_case :Tuple=768 , snake_case :Union[str, Any]=512 , snake_case :Optional[Any]=12 , snake_case :Any=12 , snake_case :Tuple=3_072 , snake_case :List[Any]="gelu" , snake_case :Optional[Any]=0.1 , snake_case :int=0.1 , snake_case :str=512 , snake_case :List[Any]=2 , snake_case :Union[str, Any]=0.02 , snake_case :Dict=1e-12 , snake_case :Union[str, Any]=False , snake_case :Optional[Any]=True , snake_case :List[str]=1 , snake_case :Any=0 , snake_case :int=2 , **snake_case :Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
A_ : Dict = vocab_size
A_ : Optional[Any] = max_position_embeddings
A_ : Union[str, Any] = hidden_size
A_ : Any = visual_embedding_dim
A_ : Union[str, Any] = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : str = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : Tuple = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : str = initializer_range
A_ : str = type_vocab_size
A_ : Any = layer_norm_eps
A_ : List[str] = bypass_transformer
A_ : str = special_visual_initialize
| 300
|
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int ) -> str:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=0 ) -> Any:
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[column] )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Any=float("inf" ) ) -> int:
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowerCAmelCase ):
A_ : Tuple = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
A_ : Union[str, Any] = current_dis
return min_dis
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=float("inf" ) ) -> Dict:
for i in range(min(6 , points_counts - 1 ) , _lowerCAmelCase ):
for j in range(max(0 , i - 6 ) , _lowerCAmelCase ):
A_ : List[Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
A_ : Union[str, Any] = current_dis
return min_dis
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> List[str]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(_lowerCAmelCase , _lowerCAmelCase )
# recursion
A_ : Optional[int] = points_counts // 2
A_ : List[Any] = closest_pair_of_points_sqr(
_lowerCAmelCase , points_sorted_on_y[:mid] , _lowerCAmelCase )
A_ : List[Any] = closest_pair_of_points_sqr(
_lowerCAmelCase , points_sorted_on_y[mid:] , points_counts - mid )
A_ : Tuple = min(_lowerCAmelCase , _lowerCAmelCase )
A_ : Dict = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowerCAmelCase )
A_ : Tuple = dis_between_closest_in_strip(
_lowerCAmelCase , len(_lowerCAmelCase ) , _lowerCAmelCase )
return min(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Any:
A_ : Optional[Any] = column_based_sort(_lowerCAmelCase , column=0 )
A_ : Optional[int] = column_based_sort(_lowerCAmelCase , column=1 )
return (
closest_pair_of_points_sqr(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
) ** 0.5
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 300
| 1
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_lowerCAmelCase : str = False
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : str = False
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_lowerCAmelCase : Any = parser.parse_args()
_lowerCAmelCase : Dict = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
_lowerCAmelCase : List[Any] = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
_lowerCAmelCase : Optional[Any] = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
_lowerCAmelCase : List[str] = reader.read()
_lowerCAmelCase : Union[str, Any] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
_lowerCAmelCase : str = UNetaDModel(**config)
else:
_lowerCAmelCase : List[str] = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
_lowerCAmelCase : List[str] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_lowerCAmelCase : Optional[Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_lowerCAmelCase : Union[str, Any] = config[key]
del config[key]
_lowerCAmelCase : int = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
_lowerCAmelCase : Tuple = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
_lowerCAmelCase : Optional[Any] = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
_lowerCAmelCase : str = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
_lowerCAmelCase : str = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
_lowerCAmelCase : Union[str, Any] = param_value
_lowerCAmelCase : List[str] = True
if not has_changed:
_lowerCAmelCase : Any = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 300
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self :Dict , snake_case :Optional[int] , snake_case :Tuple=13 , snake_case :List[Any]=30 , snake_case :Union[str, Any]=2 , snake_case :List[Any]=3 , snake_case :Tuple=True , snake_case :Dict=True , snake_case :Dict=32 , snake_case :List[str]=5 , snake_case :Optional[Any]=4 , snake_case :Any=37 , snake_case :Dict="gelu" , snake_case :List[str]=0.1 , snake_case :str=0.1 , snake_case :Tuple=10 , snake_case :str=0.02 , snake_case :Optional[Any]=None , ):
'''simple docstring'''
A_ : Tuple = parent
A_ : int = batch_size
A_ : List[str] = image_size
A_ : List[Any] = patch_size
A_ : Optional[Any] = num_channels
A_ : List[Any] = is_training
A_ : Tuple = use_labels
A_ : Union[str, Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Any = num_attention_heads
A_ : List[str] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Any = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : Dict = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Optional[int] = (image_size // patch_size) ** 2
A_ : List[str] = num_patches + 1
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Tuple = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[Any] , snake_case :str , snake_case :Tuple ):
'''simple docstring'''
A_ : Optional[Any] = ViTMSNModel(config=snake_case )
model.to(snake_case )
model.eval()
A_ : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[int] , snake_case :List[str] , snake_case :List[str] ):
'''simple docstring'''
A_ : Dict = self.type_sequence_label_size
A_ : Tuple = ViTMSNForImageClassification(snake_case )
model.to(snake_case )
model.eval()
A_ : Union[str, Any] = model(snake_case , labels=snake_case )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Union[str, Any] = 1
A_ : int = ViTMSNForImageClassification(snake_case )
model.to(snake_case )
model.eval()
A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Optional[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : List[str] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Optional[int] = config_and_inputs
A_ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = ViTMSNModelTester(self )
A_ : str = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[int] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(snake_case )
A_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[str] = [*signature.parameters.keys()]
A_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = ViTMSNModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __snake_case ( ) -> Optional[Any]:
A_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
torch.manual_seed(2 )
A_ : Any = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(snake_case )
A_ : List[str] = self.default_image_processor
A_ : int = prepare_img()
A_ : List[str] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
A_ : Optional[int] = model(**snake_case )
# verify the logits
A_ : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case )
A_ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
| 300
| 1
|
from collections import defaultdict
class __magic_name__ :
"""simple docstring"""
def __init__( self :Tuple , snake_case :List[Any] , snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
A_ : str = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(snake_case ) )
]
A_ : Dict = defaultdict(snake_case ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
A_ : List[str] = (1 << len(snake_case )) - 1
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Tuple , snake_case :List[str] ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
A_ : Any = self.count_ways_until(snake_case , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
A_ : int = total_ways_util
return self.dp[mask][task_no]
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Optional[Any] ):
'''simple docstring'''
for i in range(len(snake_case ) ):
for j in task_performed[i]:
self.task[j].append(snake_case )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_lowerCAmelCase : int = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_lowerCAmelCase : int = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 300
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :str ):
'''simple docstring'''
A_ : Dict = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**snake_case )
return config
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Tuple = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : List[str] = scheduler_class(**snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : int = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : int = scheduler_class(**snake_case )
A_ : Tuple = len(snake_case )
A_ : List[str] = self.dummy_model()
A_ : Optional[Any] = self.dummy_sample_deter
A_ : List[str] = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Tuple = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : Optional[int] = pred_prev_sample
A_ : Tuple = torch.sum(torch.abs(snake_case ) )
A_ : str = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Optional[int] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config(prediction_type="v_prediction" )
A_ : List[str] = scheduler_class(**snake_case )
A_ : int = len(snake_case )
A_ : Dict = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Any = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Optional[int] = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Tuple = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : List[str] = pred_prev_sample
A_ : Optional[Any] = torch.sum(torch.abs(snake_case ) )
A_ : List[str] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Dict = scheduler_class(**snake_case )
A_ : Optional[int] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case )
A_ : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(snake_case ):
if i == len(snake_case ) - 1:
A_ : str = -1
else:
A_ : List[str] = timesteps[i + 1]
A_ : Optional[int] = scheduler.previous_timestep(snake_case )
A_ : List[str] = prev_t.item()
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**snake_case )
A_ : List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Any = self.scheduler_classes[0]
A_ : Union[str, Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Union[str, Any] = [100, 87, 50, 1, 0]
A_ : Optional[int] = len(snake_case )
with self.assertRaises(snake_case , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=snake_case )
| 300
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = ['''pixel_values''']
def __init__( self :Optional[int] , snake_case :bool = True , snake_case :Optional[Dict[str, int]] = None , snake_case :PILImageResampling = PILImageResampling.BILINEAR , snake_case :bool = True , snake_case :Dict[str, int] = None , snake_case :bool = True , snake_case :Union[int, float] = 1 / 255 , snake_case :bool = True , snake_case :Optional[Union[float, List[float]]] = None , snake_case :Optional[Union[float, List[float]]] = None , **snake_case :List[str] , ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : int = size if size is not None else {"shortest_edge": 256}
A_ : Optional[int] = get_size_dict(snake_case , default_to_square=snake_case )
A_ : str = crop_size if crop_size is not None else {"height": 224, "width": 224}
A_ : List[str] = get_size_dict(snake_case , param_name="crop_size" )
A_ : Optional[Any] = do_resize
A_ : str = size
A_ : List[str] = resample
A_ : Any = do_center_crop
A_ : List[str] = crop_size
A_ : Union[str, Any] = do_rescale
A_ : Optional[int] = rescale_factor
A_ : Dict = do_normalize
A_ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A_ : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :np.ndarray , snake_case :Dict[str, int] , snake_case :PILImageResampling = PILImageResampling.BICUBIC , snake_case :Optional[Union[str, ChannelDimension]] = None , **snake_case :Optional[int] , ):
'''simple docstring'''
A_ : Union[str, Any] = get_size_dict(snake_case , default_to_square=snake_case )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
A_ : Union[str, Any] = get_resize_output_image_size(snake_case , size=size["shortest_edge"] , default_to_square=snake_case )
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :str , snake_case :np.ndarray , snake_case :Dict[str, int] , snake_case :Optional[Union[str, ChannelDimension]] = None , **snake_case :List[str] , ):
'''simple docstring'''
A_ : int = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(snake_case , size=(size["height"], size["width"]) , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :np.ndarray , snake_case :float , snake_case :Optional[Union[str, ChannelDimension]] = None , **snake_case :int ):
'''simple docstring'''
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :np.ndarray , snake_case :Union[float, List[float]] , snake_case :Union[float, List[float]] , snake_case :Optional[Union[str, ChannelDimension]] = None , **snake_case :List[Any] , ):
'''simple docstring'''
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :ImageInput , snake_case :Optional[bool] = None , snake_case :Dict[str, int] = None , snake_case :PILImageResampling = None , snake_case :bool = None , snake_case :Dict[str, int] = None , snake_case :Optional[bool] = None , snake_case :Optional[float] = None , snake_case :Optional[bool] = None , snake_case :Optional[Union[float, List[float]]] = None , snake_case :Optional[Union[float, List[float]]] = None , snake_case :Optional[Union[str, TensorType]] = None , snake_case :Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case :Any , ):
'''simple docstring'''
A_ : Tuple = do_resize if do_resize is not None else self.do_resize
A_ : Optional[int] = size if size is not None else self.size
A_ : List[Any] = get_size_dict(snake_case , default_to_square=snake_case )
A_ : int = resample if resample is not None else self.resample
A_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : str = crop_size if crop_size is not None else self.crop_size
A_ : Any = get_size_dict(snake_case , param_name="crop_size" )
A_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : str = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : Union[str, Any] = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(snake_case ) for image in images]
if do_resize:
A_ : Optional[int] = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_center_crop:
A_ : str = [self.center_crop(image=snake_case , size=snake_case ) for image in images]
if do_rescale:
A_ : List[Any] = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
A_ : List[Any] = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
A_ : Any = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
A_ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Union[str, Any] , snake_case :List[Tuple] = None ):
'''simple docstring'''
A_ : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case ) != len(snake_case ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(snake_case ):
A_ : List[str] = target_sizes.numpy()
A_ : List[str] = []
for idx in range(len(snake_case ) ):
A_ : List[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=snake_case )
A_ : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case )
else:
A_ : Optional[int] = logits.argmax(dim=1 )
A_ : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 300
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : int = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ) -> List[Any]:
for attribute in key.split("." ):
A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
A_ : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Any = value
elif weight_type == "bias":
A_ : str = value
else:
A_ : Any = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ) -> List[str]:
A_ : Optional[Any] = []
A_ : Any = fairseq_model.state_dict()
A_ : Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A_ : str = None
for name, value in fairseq_dict.items():
A_ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
A_ : Optional[Any] = True
elif name.split("." )[0] == "proj":
A_ : Dict = fairseq_model.proj
A_ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ : int = True
if "*" in mapped_key:
A_ : Optional[Any] = name.split(_lowerCAmelCase )[0].split("." )[-2]
A_ : int = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
A_ : List[Any] = "weight_g"
elif "weight_v" in name:
A_ : List[Any] = "weight_v"
elif "bias" in name:
A_ : Dict = "bias"
elif "weight" in name:
A_ : List[Any] = "weight"
else:
A_ : Dict = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str:
A_ : Any = full_name.split("conv_layers." )[-1]
A_ : Optional[int] = name.split("." )
A_ : Optional[Any] = int(items[0] )
A_ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
A_ : List[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
A_ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
A_ : List[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
A_ : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : Optional[int] ) -> str:
A_ , A_ : List[str] = emb.weight.shape
A_ : Optional[int] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
A_ : List[Any] = emb.weight.data
return lin_layer
def __snake_case ( _lowerCAmelCase : str ) -> Tuple:
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
A_ : int = f.readlines()
A_ : Dict = [line.split(" " )[0] for line in lines]
A_ : Tuple = len(_lowerCAmelCase )
A_ : Union[str, Any] = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , ) -> Tuple:
A_ : Optional[int] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
A_ : str = SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase )
A_ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
A_ , A_ , A_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
A_ : Union[str, Any] = model[0].eval()
# set weights for wav2vec2 encoder
A_ : Tuple = WavaVecaModel(_lowerCAmelCase )
A_ : str = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
A_ : Tuple = SpeechaTextaForCausalLM(_lowerCAmelCase )
A_ , A_ : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
A_ : Union[str, Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
A_ : str = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
A_ : Optional[Any] = False
# add projection layer
A_ : Optional[Any] = nn.Parameter(projection_layer.weight )
A_ : int = nn.Parameter(projection_layer.bias )
A_ : str = create_vocab_dict(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "vocab.json" ) , "w" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
A_ : Any = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , "vocab.json" ) )
tokenizer.save_pretrained(_lowerCAmelCase )
A_ : Optional[int] = hf_wavavec.config.to_dict()
A_ : int = tokenizer.pad_token_id
A_ : List[str] = tokenizer.bos_token_id
A_ : List[str] = tokenizer.eos_token_id
A_ : List[str] = "speech_to_text_2"
A_ : Tuple = "wav2vec2"
A_ : str = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=10_224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 300
| 1
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Optional[Any] , snake_case :str ):
'''simple docstring'''
return f"gaussian_noise_s={seed}_shape={'_'.join([str(snake_case ) for s in shape] )}.npy"
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE ( self :int , snake_case :str=0 , snake_case :Any=(4, 4, 64, 64) , snake_case :Tuple=False ):
'''simple docstring'''
A_ : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A_ : List[str] = jnp.array(load_hf_numpy(self.get_file_format(snake_case , snake_case ) ) , dtype=snake_case )
return image
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str]=False , snake_case :Dict="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
A_ : str = jnp.bfloataa if fpaa else jnp.floataa
A_ : str = "bf16" if fpaa else None
A_ , A_ : Dict = FlaxUNetaDConditionModel.from_pretrained(
snake_case , subfolder="unet" , dtype=snake_case , revision=snake_case )
return model, params
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Tuple=0 , snake_case :List[str]=(4, 77, 768) , snake_case :Any=False ):
'''simple docstring'''
A_ : Any = jnp.bfloataa if fpaa else jnp.floataa
A_ : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(snake_case , snake_case ) ) , dtype=snake_case )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :int , snake_case :Optional[Any] , snake_case :str ):
'''simple docstring'''
A_ , A_ : List[Any] = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=snake_case )
A_ : Optional[int] = self.get_latents(snake_case , fpaa=snake_case )
A_ : Optional[int] = self.get_encoder_hidden_states(snake_case , fpaa=snake_case )
A_ : Optional[Any] = model.apply(
{"params": params} , snake_case , jnp.array(snake_case , dtype=jnp.intaa ) , encoder_hidden_states=snake_case , ).sample
assert sample.shape == latents.shape
A_ : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
A_ : List[str] = jnp.array(snake_case , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(snake_case , snake_case , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :Optional[Any] , snake_case :List[Any] ):
'''simple docstring'''
A_ , A_ : Union[str, Any] = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=snake_case )
A_ : Optional[Any] = self.get_latents(snake_case , shape=(4, 4, 96, 96) , fpaa=snake_case )
A_ : str = self.get_encoder_hidden_states(snake_case , shape=(4, 77, 1_024) , fpaa=snake_case )
A_ : Tuple = model.apply(
{"params": params} , snake_case , jnp.array(snake_case , dtype=jnp.intaa ) , encoder_hidden_states=snake_case , ).sample
assert sample.shape == latents.shape
A_ : Optional[int] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
A_ : List[Any] = jnp.array(snake_case , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(snake_case , snake_case , atol=1e-2 )
| 300
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __magic_name__ :
"""simple docstring"""
def __init__( self :Tuple , snake_case :Optional[Any] , snake_case :Tuple=13 , snake_case :Dict=7 , snake_case :List[Any]=True , snake_case :List[Any]=True , snake_case :Dict=True , snake_case :Any=True , snake_case :Optional[int]=99 , snake_case :Any=32 , snake_case :Dict=2 , snake_case :int=4 , snake_case :Optional[int]=37 , snake_case :List[str]="gelu" , snake_case :List[Any]=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Tuple=512 , snake_case :Tuple=16 , snake_case :Tuple=2 , snake_case :Optional[int]=0.02 , snake_case :str=3 , snake_case :Optional[int]=4 , snake_case :List[str]=None , snake_case :Tuple=1_000 , ):
'''simple docstring'''
A_ : str = parent
A_ : str = batch_size
A_ : str = seq_length
A_ : Any = is_training
A_ : Any = use_input_mask
A_ : str = use_token_type_ids
A_ : Tuple = use_labels
A_ : Optional[Any] = vocab_size
A_ : Dict = hidden_size
A_ : str = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : str = intermediate_size
A_ : int = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : Any = type_sequence_label_size
A_ : Dict = initializer_range
A_ : Any = num_labels
A_ : Optional[int] = num_choices
A_ : Optional[Any] = scope
A_ : Any = range_bbox
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ : str = bbox[i, j, 3]
A_ : Union[str, Any] = bbox[i, j, 1]
A_ : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ : Any = bbox[i, j, 2]
A_ : Tuple = bbox[i, j, 0]
A_ : int = t
A_ : int = tf.convert_to_tensor(snake_case )
A_ : Any = None
if self.use_input_mask:
A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : str = None
if self.use_token_type_ids:
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Dict = None
A_ : List[Any] = None
A_ : List[str] = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : str = ids_tensor([self.batch_size] , self.num_choices )
A_ : int = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , snake_case :Union[str, Any] , snake_case :int , snake_case :int , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Optional[int] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Any = TFLayoutLMModel(config=snake_case )
A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
A_ : str = model(snake_case , snake_case , token_type_ids=snake_case )
A_ : List[Any] = model(snake_case , snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :List[Any] , snake_case :List[str] , snake_case :Optional[Any] , snake_case :Dict , snake_case :Any , snake_case :Union[str, Any] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Optional[int] = TFLayoutLMForMaskedLM(config=snake_case )
A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , snake_case :Tuple , snake_case :Tuple , snake_case :List[str] , snake_case :Tuple , snake_case :str , snake_case :Optional[int] , snake_case :Any ):
'''simple docstring'''
A_ : Union[str, Any] = self.num_labels
A_ : int = TFLayoutLMForSequenceClassification(config=snake_case )
A_ : Optional[int] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict , snake_case :str , snake_case :Optional[Any] , snake_case :int , snake_case :Any , snake_case :Tuple , snake_case :List[str] , snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.num_labels
A_ : str = TFLayoutLMForTokenClassification(config=snake_case )
A_ : Union[str, Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[str] , snake_case :Optional[int] , snake_case :Union[str, Any] , snake_case :List[Any] , snake_case :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ):
'''simple docstring'''
A_ : Optional[Any] = TFLayoutLMForQuestionAnswering(config=snake_case )
A_ : List[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__UpperCamelCase = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 10
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Tuple = TFLayoutLMModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[str] = TFLayoutLMModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def __snake_case ( ) -> Optional[Any]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
A_ : int = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
A_ : int = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
A_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
A_ : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
A_ : Tuple = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : str = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
A_ , A_ , A_ , A_ , A_ : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Tuple = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the sequence output on [0, :3, :3]
A_ : List[Any] = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-3 ) )
# test the pooled output on [1, :3]
A_ : Optional[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
A_ , A_ , A_ , A_ , A_ : Any = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Dict = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
A_ : List[str] = outputs.loss
A_ : Union[str, Any] = (2,)
self.assertEqual(loss.shape , snake_case )
# test the shape of the logits
A_ : Tuple = outputs.logits
A_ : Tuple = (2, 2)
self.assertEqual(logits.shape , snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : int = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
A_ , A_ , A_ , A_ , A_ : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Union[str, Any] = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
# test the shape of the logits
A_ : Dict = outputs.logits
A_ : List[Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
A_ , A_ , A_ , A_ , A_ : str = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Union[str, Any] = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the shape of the logits
A_ : Union[str, Any] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , snake_case )
self.assertEqual(outputs.end_logits.shape , snake_case )
| 300
| 1
|
from math import ceil
def __snake_case ( _lowerCAmelCase : int = 1001 ) -> int:
A_ : Union[str, Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
A_ : Union[str, Any] = 2 * i + 1
A_ : Optional[int] = 2 * i
A_ : Dict = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_lowerCAmelCase : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 300
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase : Optional[int] = '''
Human: <<task>>
Assistant: '''
_lowerCAmelCase : int = '''huggingface-tools/default-prompts'''
_lowerCAmelCase : Any = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict="run" ) -> List[Any]:
if prompt_or_repo_id is None:
A_ : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
A_ : Optional[Any] = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 300
| 1
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Any , snake_case :pyspark.sql.DataFrame , snake_case :Optional[NamedSplit] = None , snake_case :Optional[Features] = None , snake_case :bool = True , snake_case :str = None , snake_case :bool = False , snake_case :str = None , snake_case :bool = True , snake_case :str = "arrow" , **snake_case :int , ):
'''simple docstring'''
super().__init__(
split=snake_case , features=snake_case , cache_dir=snake_case , keep_in_memory=snake_case , streaming=snake_case , **snake_case , )
A_ : Any = load_from_cache_file
A_ : Union[str, Any] = file_format
A_ : Tuple = Spark(
df=snake_case , features=snake_case , cache_dir=snake_case , working_dir=snake_case , **snake_case , )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
A_ : int = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=snake_case , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 300
|
def __snake_case ( _lowerCAmelCase : list ) -> list:
if len(_lowerCAmelCase ) <= 1:
return [tuple(_lowerCAmelCase )]
A_ : Tuple = []
def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ):
A_ : List[str] = [0] * n
res.append(tuple(_lowerCAmelCase ) )
A_ : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
A_ , A_ : str = arr[i], arr[0]
else:
A_ , A_ : List[str] = arr[i], arr[c[i]]
res.append(tuple(_lowerCAmelCase ) )
c[i] += 1
A_ : Tuple = 0
else:
A_ : Dict = 0
i += 1
generate(len(_lowerCAmelCase ) , _lowerCAmelCase )
return res
if __name__ == "__main__":
_lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 300
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : List[Any] = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 300
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : List[Any] = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : Any = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = RobertaTokenizer
def __init__( self :Dict , snake_case :List[str]=None , snake_case :List[Any]=None , snake_case :Union[str, Any]=None , snake_case :List[str]="replace" , snake_case :Tuple="<s>" , snake_case :Union[str, Any]="</s>" , snake_case :str="</s>" , snake_case :Union[str, Any]="<s>" , snake_case :int="<unk>" , snake_case :Tuple="<pad>" , snake_case :List[str]="<mask>" , snake_case :Any=False , snake_case :Union[str, Any]=True , **snake_case :Optional[int] , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
A_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : Dict = getattr(snake_case , pre_tok_state.pop("type" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**snake_case )
A_ : Optional[int] = add_prefix_space
A_ : Optional[int] = "post_processor"
A_ : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
A_ : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : List[Any] = tuple(state["sep"] )
if "cls" in state:
A_ : Optional[Any] = tuple(state["cls"] )
A_ : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : List[Any] = add_prefix_space
A_ : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
A_ : List[str] = trim_offsets
A_ : Any = True
if changes_to_apply:
A_ : Optional[Any] = getattr(snake_case , state.pop("type" ) )
A_ : Any = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Dict ):
'''simple docstring'''
A_ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
A_ : Any = value
def SCREAMING_SNAKE_CASE ( self :Dict , *snake_case :Tuple , **snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : Any = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , *snake_case :str , **snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : Any = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Optional[Any]=None ):
'''simple docstring'''
A_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : Any = [self.sep_token_id]
A_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 300
| 1
|
import copy
import random
from transformers import CLIPTokenizer
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Optional[Any] , *snake_case :str , **snake_case :Optional[int] ):
'''simple docstring'''
super().__init__(*snake_case , **snake_case )
A_ : List[Any] = {}
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Dict , *snake_case :Union[str, Any] , **snake_case :Any ):
'''simple docstring'''
A_ : List[str] = super().add_tokens(snake_case , *snake_case , **snake_case )
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {placeholder_token}. Please pass a different"
" `placeholder_token` that is not already in the tokenizer." )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Tuple , *snake_case :Optional[Any] , snake_case :List[Any]=1 , **snake_case :str ):
'''simple docstring'''
A_ : str = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case , *snake_case , **snake_case )
output.append(snake_case )
else:
A_ : List[Any] = []
for i in range(snake_case ):
A_ : Any = placeholder_token + f"_{i}"
self.try_adding_tokens(snake_case , *snake_case , **snake_case )
output.append(snake_case )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"The tokenizer already has placeholder token {token} that can get confused with"
f" {placeholder_token}keep placeholder tokens independent" )
A_ : str = output
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :Tuple , snake_case :Optional[Any]=False , snake_case :Any=1.0 ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
A_ : Union[str, Any] = []
for i in range(len(snake_case ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
A_ : Any = self.token_map[placeholder_token]
A_ : Any = tokens[: 1 + int(len(snake_case ) * prop_tokens_to_load )]
if vector_shuffle:
A_ : Union[str, Any] = copy.copy(snake_case )
random.shuffle(snake_case )
A_ : Tuple = text.replace(snake_case , " ".join(snake_case ) )
return text
def __call__( self :Dict , snake_case :List[str] , *snake_case :Dict , snake_case :List[str]=False , snake_case :Any=1.0 , **snake_case :Any ):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case , vector_shuffle=snake_case , prop_tokens_to_load=snake_case ) , *snake_case , **snake_case , )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Union[str, Any] , *snake_case :Any , snake_case :Any=False , snake_case :Tuple=1.0 , **snake_case :Optional[Any] ):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case , vector_shuffle=snake_case , prop_tokens_to_load=snake_case ) , *snake_case , **snake_case , )
| 300
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCAmelCase : int = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
_lowerCAmelCase : Tuple = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
_lowerCAmelCase : int = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[List[List[str]]] , snake_case :List[List[str]] , snake_case :int = 1 , snake_case :int = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case , hypotheses=snake_case , min_len=snake_case , max_len=snake_case )
}
| 300
| 1
|
import operator as op
def __snake_case ( _lowerCAmelCase : Optional[int] ) -> Dict:
A_ : List[Any] = []
A_ : List[str] = lambda _lowerCAmelCase , _lowerCAmelCase : int(x / y ) # noqa: E731 integer division operation
A_ : int = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(_lowerCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_lowerCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(_lowerCAmelCase ) , sep=" | " )
else:
A_ : Union[str, Any] = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(_lowerCAmelCase ) , sep=" | " )
A_ : Tuple = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(_lowerCAmelCase ) , sep=" | " )
stack.append(
str(opr[x](int(_lowerCAmelCase ) , int(_lowerCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(_lowerCAmelCase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 300
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
A_ : Tuple = tmp_path / "cache"
A_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Optional[Any] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ) -> str:
A_ : List[Any] = tmp_path / "cache"
A_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : int = features.copy() if features else default_expected_features
A_ : str = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Union[str, Any] = ParquetDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ) -> Optional[Any]:
A_ : Dict = tmp_path / "cache"
A_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> List[str]:
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
A_ : int = parquet_path
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
A_ : Optional[int] = [parquet_path]
A_ : Optional[int] = tmp_path / "cache"
A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=("train",) ) -> Tuple:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
for split in splits:
A_ : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> Optional[int]:
A_ : Optional[Any] = tmp_path / "cache"
A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Union[str, Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str ) -> Tuple:
A_ : Optional[Any] = tmp_path / "cache"
A_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : List[str] = features.copy() if features else default_expected_features
A_ : Tuple = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Optional[int] = ParquetDatasetReader({"train": parquet_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Union[str, Any]:
if split:
A_ : Any = {split: parquet_path}
else:
A_ : Optional[Any] = "train"
A_ : str = {"train": parquet_path, "test": parquet_path}
A_ : Any = tmp_path / "cache"
A_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Dict = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ) -> Dict:
A_ : List[str] = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
A_ : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
A_ : Dict = pf.read()
assert dataset.data.table == output_table
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ) -> List[Any]:
A_ : Tuple = str(shared_datadir / "test_image_rgb.jpg" )
A_ : int = {"image": [image_path]}
A_ : Optional[Any] = Features({"image": Image()} )
A_ : Union[str, Any] = Dataset.from_dict(_lowerCAmelCase , features=_lowerCAmelCase )
A_ : Tuple = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
A_ : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
A_ : int = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCAmelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ) -> Any:
assert get_writer_batch_size(_lowerCAmelCase ) == expected
| 300
| 1
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
A_ : Tuple = Vector()
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Optional[int] = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(snake_case ) , "(0,0,0,0,0,1)" )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : List[str] = Vector([1, 2, 3, 4] )
self.assertEqual(len(snake_case ) , 4 )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Union[str, Any] = Vector([1, 2] )
A_ : Dict = Vector([1, 2, 3, 4, 5] )
A_ : Optional[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
A_ : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = Vector([1, 2, 3] )
A_ : int = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : List[Any] = Vector([1, 2, 3] )
A_ : Any = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = Vector([1, 2, 3] )
A_ : Dict = Vector([2, -1, 4] ) # for test of dot product
A_ : Tuple = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Dict = Vector([1, 2, 3] )
A_ : Union[str, Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , snake_case , snake_case ) ) , "(3,4,7)" )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = Vector([1, 0, 0, 0, 0, 0] )
A_ : Optional[Any] = x.copy()
self.assertEqual(str(snake_case ) , str(snake_case ) )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Dict = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(snake_case ) , "(0,1,0)" )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(snake_case ) )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A_ : Tuple = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(snake_case , snake_case ) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A_ : List[str] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(snake_case , snake_case ) )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : List[str] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
A_ : List[str] = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(snake_case ) )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A_ : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
A_ : Tuple = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 300
|
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]="shi-labs/oneformer_demo" ) -> int:
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) as f:
A_ : Optional[int] = json.load(_lowerCAmelCase )
A_ : Union[str, Any] = {}
A_ : Tuple = []
A_ : Optional[Any] = []
for key, info in class_info.items():
A_ : Tuple = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
A_ : Optional[Any] = thing_ids
A_ : int = class_names
return metadata
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :List[Any] , snake_case :List[str] , snake_case :int=7 , snake_case :Optional[int]=3 , snake_case :Union[str, Any]=30 , snake_case :Tuple=400 , snake_case :List[Any]=None , snake_case :Optional[Any]=True , snake_case :Tuple=True , snake_case :Dict=[0.5, 0.5, 0.5] , snake_case :Any=[0.5, 0.5, 0.5] , snake_case :Optional[int]=10 , snake_case :Tuple=False , snake_case :Optional[int]=255 , snake_case :Optional[Any]="shi-labs/oneformer_demo" , snake_case :Optional[Any]="ade20k_panoptic.json" , snake_case :Optional[int]=10 , ):
'''simple docstring'''
A_ : Tuple = parent
A_ : List[str] = batch_size
A_ : Optional[int] = num_channels
A_ : Tuple = min_resolution
A_ : List[Any] = max_resolution
A_ : Union[str, Any] = do_resize
A_ : Any = {"shortest_edge": 32, "longest_edge": 1_333} if size is None else size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : List[Any] = image_std
A_ : Union[str, Any] = class_info_file
A_ : List[Any] = prepare_metadata(snake_case , snake_case )
A_ : Tuple = num_text
A_ : str = repo_path
# for the post_process_functions
A_ : Any = 2
A_ : int = 10
A_ : Optional[int] = 10
A_ : Tuple = 3
A_ : Tuple = 4
A_ : str = num_labels
A_ : int = do_reduce_labels
A_ : List[Any] = ignore_index
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Any , snake_case :Any=False ):
'''simple docstring'''
if not batched:
A_ : List[str] = image_inputs[0]
if isinstance(snake_case , Image.Image ):
A_ , A_ : Dict = image.size
else:
A_ , A_ : Tuple = image.shape[1], image.shape[2]
if w < h:
A_ : str = int(self.size["shortest_edge"] * h / w )
A_ : Any = self.size["shortest_edge"]
elif w > h:
A_ : Optional[int] = self.size["shortest_edge"]
A_ : List[str] = int(self.size["shortest_edge"] * w / h )
else:
A_ : List[str] = self.size["shortest_edge"]
A_ : Optional[Any] = self.size["shortest_edge"]
else:
A_ : Tuple = []
for image in image_inputs:
A_ , A_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : Tuple = max(snake_case , key=lambda snake_case : item[0] )[0]
A_ : Union[str, Any] = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__UpperCamelCase = image_processing_class
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , "image_mean" ) )
self.assertTrue(hasattr(snake_case , "image_std" ) )
self.assertTrue(hasattr(snake_case , "do_normalize" ) )
self.assertTrue(hasattr(snake_case , "do_resize" ) )
self.assertTrue(hasattr(snake_case , "size" ) )
self.assertTrue(hasattr(snake_case , "ignore_index" ) )
self.assertTrue(hasattr(snake_case , "class_info_file" ) )
self.assertTrue(hasattr(snake_case , "num_text" ) )
self.assertTrue(hasattr(snake_case , "repo_path" ) )
self.assertTrue(hasattr(snake_case , "metadata" ) )
self.assertTrue(hasattr(snake_case , "do_reduce_labels" ) )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : str = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Optional[Any] = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : List[str] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
A_ : List[str] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : List[str] = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : int = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : Optional[Any] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
A_ : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : Any = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict=False , snake_case :str=False , snake_case :Dict="np" ):
'''simple docstring'''
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
A_ : Tuple = self.image_processing_tester.num_labels
A_ : str = None
A_ : Tuple = None
A_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
if with_segmentation_maps:
A_ : List[str] = num_labels
if is_instance_map:
A_ : List[str] = list(range(snake_case ) ) * 2
A_ : int = dict(enumerate(snake_case ) )
A_ : List[str] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
A_ : int = [Image.fromarray(snake_case ) for annotation in annotations]
A_ : List[str] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , snake_case , return_tensors="pt" , instance_id_to_semantic_id=snake_case , pad_and_return_pixel_mask=snake_case , )
return inputs
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
def common(snake_case :Dict=False , snake_case :Optional[int]=None ):
A_ : Tuple = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case , is_instance_map=snake_case , segmentation_type=snake_case )
A_ : Optional[Any] = inputs["mask_labels"]
A_ : List[Any] = inputs["class_labels"]
A_ : Optional[Any] = inputs["pixel_values"]
A_ : int = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case , snake_case , snake_case ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case )
common(is_instance_map=snake_case , segmentation_type="pil" )
common(is_instance_map=snake_case , segmentation_type="pil" )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = np.zeros((20, 50) )
A_ : List[str] = 1
A_ : int = 1
A_ : Optional[Any] = 1
A_ : Any = binary_mask_to_rle(snake_case )
self.assertEqual(len(snake_case ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : int = fature_extractor.post_process_semantic_segmentation(snake_case )
self.assertEqual(len(snake_case ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
A_ : Optional[int] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
A_ : List[Any] = fature_extractor.post_process_semantic_segmentation(snake_case , target_sizes=snake_case )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : str = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : Optional[Any] = image_processor.post_process_instance_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : Optional[Any] = image_processor.post_process_panoptic_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 300
| 1
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( *snake_case :List[Any] , **snake_case :Any ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
A_ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ : Tuple = image_classifier(snake_case , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(snake_case ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
A_ : Tuple = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case ) , [
[
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
],
[
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
],
[
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
],
[
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
],
[
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
A_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ : Any = image_classifier(snake_case , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(snake_case ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
A_ : Optional[int] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case ) , [
[
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
],
[
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
],
[
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
],
[
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
],
[
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
{"score": 0.333, "label": ANY(snake_case )},
],
] , )
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
A_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ : str = image_classifier(snake_case , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(snake_case ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ : str = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : str = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
A_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ : Dict = image_classifier(snake_case , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(snake_case ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ : Dict = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 300
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''data2vec-vision'''
def __init__( self :int , snake_case :Optional[int]=768 , snake_case :Any=12 , snake_case :Any=12 , snake_case :Tuple=3_072 , snake_case :Any="gelu" , snake_case :Tuple=0.0 , snake_case :int=0.0 , snake_case :Any=0.02 , snake_case :str=1e-12 , snake_case :List[str]=224 , snake_case :Dict=16 , snake_case :int=3 , snake_case :int=False , snake_case :str=False , snake_case :List[Any]=False , snake_case :Optional[Any]=False , snake_case :Tuple=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Any=True , snake_case :Optional[Any]=[3, 5, 7, 11] , snake_case :Dict=[1, 2, 3, 6] , snake_case :int=True , snake_case :List[Any]=0.4 , snake_case :Any=256 , snake_case :Union[str, Any]=1 , snake_case :Union[str, Any]=False , snake_case :Any=255 , **snake_case :int , ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : Dict = hidden_size
A_ : Tuple = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Any = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Any = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Optional[Any] = initializer_range
A_ : List[str] = layer_norm_eps
A_ : str = image_size
A_ : Optional[int] = patch_size
A_ : int = num_channels
A_ : Optional[Any] = use_mask_token
A_ : Optional[Any] = use_absolute_position_embeddings
A_ : Optional[int] = use_relative_position_bias
A_ : Dict = use_shared_relative_position_bias
A_ : Any = layer_scale_init_value
A_ : Optional[Any] = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : str = use_auxiliary_head
A_ : List[Any] = auxiliary_loss_weight
A_ : List[str] = auxiliary_channels
A_ : Dict = auxiliary_num_convs
A_ : List[str] = auxiliary_concat_input
A_ : Optional[int] = semantic_loss_ignore_index
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return 1e-4
| 300
| 1
|
_lowerCAmelCase : Optional[Any] = 65_521
def __snake_case ( _lowerCAmelCase : str ) -> int:
A_ : str = 1
A_ : List[str] = 0
for plain_chr in plain_text:
A_ : Optional[Any] = (a + ord(_lowerCAmelCase )) % MOD_ADLER
A_ : Optional[Any] = (b + a) % MOD_ADLER
return (b << 16) | a
| 300
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = ['''input_features''', '''attention_mask''']
def __init__( self :int , snake_case :int=80 , snake_case :Optional[int]=16_000 , snake_case :Tuple=0.0 , snake_case :Optional[int]=10 , snake_case :Optional[Any]=25 , snake_case :Dict="hamming_window" , snake_case :Tuple=32768.0 , snake_case :str=0.97 , snake_case :List[str]=1.0 , snake_case :Dict=True , snake_case :str=True , snake_case :Optional[Any]=False , **snake_case :Union[str, Any] , ):
'''simple docstring'''
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
A_ : Union[str, Any] = feature_size
A_ : int = sampling_rate
A_ : str = padding_value
A_ : int = hop_length
A_ : List[str] = win_length
A_ : Any = frame_signal_scale
A_ : str = preemphasis_coeff
A_ : List[str] = mel_floor
A_ : str = normalize_means
A_ : Any = normalize_vars
A_ : Optional[Any] = win_function
A_ : Dict = return_attention_mask
A_ : List[str] = win_length * sampling_rate // 1_000
A_ : List[str] = hop_length * sampling_rate // 1_000
A_ : List[str] = optimal_fft_length(self.sample_size )
A_ : str = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
A_ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case )
else:
A_ : List[str] = window_function(window_length=self.sample_size , name=self.win_function )
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
A_ : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=snake_case , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case , preemphasis=self.preemphasis_coeff , mel_filters=snake_case , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :str ):
'''simple docstring'''
if self.normalize_means:
A_ : int = x[:input_length].mean(axis=0 )
A_ : Any = np.subtract(snake_case , snake_case )
if self.normalize_vars:
A_ : List[Any] = x[:input_length].std(axis=0 )
A_ : Optional[int] = np.divide(snake_case , snake_case )
if input_length < x.shape[0]:
A_ : Optional[int] = padding_value
# make sure array is in float32
A_ : Union[str, Any] = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[np.ndarray] , snake_case :Optional[np.ndarray] = None ):
'''simple docstring'''
A_ : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case , snake_case , self.padding_value ) for x, n in zip(snake_case , snake_case )]
def __call__( self :int , snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case :Union[bool, str, PaddingStrategy] = False , snake_case :Optional[int] = None , snake_case :bool = False , snake_case :Optional[int] = None , snake_case :Optional[bool] = None , snake_case :Optional[Union[str, TensorType]] = None , snake_case :Optional[int] = None , **snake_case :Dict , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
A_ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
A_ : Optional[Any] = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : List[Any] = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
A_ : int = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Tuple = [raw_speech]
# extract fbank features
A_ : int = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
A_ : Union[str, Any] = BatchFeature({"input_features": features} )
A_ : str = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
# make sure list is in array format
A_ : Optional[int] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , snake_case ):
A_ : Union[str, Any] = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features]
A_ : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
A_ : Any = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
A_ : Dict = (
np.array(snake_case , dtype=np.intaa )
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
A_ : Optional[int] = self.normalize(
padded_inputs["input_features"] , attention_mask=snake_case )
if return_tensors is not None:
A_ : Dict = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 300
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : List[str] = tempfile.mkdtemp()
A_ : Optional[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
A_ : Optional[int] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
A_ : List[Any] = os.path.join(self.tmpdirname , snake_case )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any , **snake_case :List[Any] ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , **snake_case :Optional[Any] ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] , **snake_case :Union[str, Any] ):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Any = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Tuple = self.get_tokenizer()
A_ : Optional[int] = self.get_rust_tokenizer()
A_ : Optional[Any] = self.get_image_processor()
A_ : Optional[int] = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_slow.save_pretrained(self.tmpdirname )
A_ : Dict = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case )
A_ : List[Any] = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_fast.save_pretrained(self.tmpdirname )
A_ : str = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case )
self.assertIsInstance(processor_fast.tokenizer , snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case )
self.assertIsInstance(processor_fast.image_processor , snake_case )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Dict = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A_ : Dict = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
A_ : List[Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : str = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : int = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Dict = image_processor(snake_case , return_tensors="np" )
A_ : Any = processor(images=snake_case , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Dict = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
A_ : Optional[int] = "lower newer"
A_ : str = processor(text=snake_case )
A_ : List[Any] = tokenizer(snake_case , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : List[str] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : Optional[int] = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
A_ : Any = "lower newer"
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Any = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Optional[int] = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
A_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : str = processor.batch_decode(snake_case )
A_ : Dict = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Dict = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Dict = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
A_ : List[str] = "lower newer"
A_ : Union[str, Any] = self.prepare_image_inputs()
A_ : Tuple = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 300
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self :List[Any] , snake_case :int , snake_case :int , snake_case :Optional[int] = None , snake_case :int = 50_257 , snake_case :int = 1_024 , snake_case :int = 768 , snake_case :int = 12 , snake_case :int = 12 , snake_case :Optional[int] = None , snake_case :str = "gelu_new" , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 1e-5 , snake_case :float = 0.02 , snake_case :bool = True , snake_case :bool = True , snake_case :bool = False , snake_case :bool = False , ):
'''simple docstring'''
super().__init__()
A_ : Tuple = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
A_ : List[Any] = prefix_inner_dim
A_ : Union[str, Any] = prefix_hidden_dim
A_ : List[str] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A_ : List[Any] = (
nn.Linear(self.prefix_hidden_dim , snake_case ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A_ : List[Any] = GPTaConfig(
vocab_size=snake_case , n_positions=snake_case , n_embd=snake_case , n_layer=snake_case , n_head=snake_case , n_inner=snake_case , activation_function=snake_case , resid_pdrop=snake_case , embd_pdrop=snake_case , attn_pdrop=snake_case , layer_norm_epsilon=snake_case , initializer_range=snake_case , scale_attn_weights=snake_case , use_cache=snake_case , scale_attn_by_inverse_layer_idx=snake_case , reorder_and_upcast_attn=snake_case , )
A_ : Optional[Any] = GPTaLMHeadModel(snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.Tensor , snake_case :torch.Tensor , snake_case :Optional[torch.Tensor] = None , snake_case :Optional[torch.Tensor] = None , ):
'''simple docstring'''
A_ : Any = self.transformer.transformer.wte(snake_case )
A_ : str = self.encode_prefix(snake_case )
A_ : Union[str, Any] = self.decode_prefix(snake_case )
A_ : int = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A_ : Dict = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A_ : int = torch.cat((dummy_token, input_ids) , dim=1 )
A_ : Union[str, Any] = self.transformer(inputs_embeds=snake_case , labels=snake_case , attention_mask=snake_case )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE ( self :str , snake_case :int , snake_case :torch.device ):
'''simple docstring'''
return torch.zeros(snake_case , self.prefix_length , dtype=torch.intaa , device=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :int ):
'''simple docstring'''
return self.encode_prefix(snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Dict , snake_case :Optional[int] , snake_case :Any ):
'''simple docstring'''
A_ : Any = torch.split(snake_case , 1 , dim=0 )
A_ : Optional[int] = []
A_ : Union[str, Any] = []
for feature in features:
A_ : Tuple = self.decode_prefix(feature.to(snake_case ) ) # back to the clip feature
# Only support beam search for now
A_ , A_ : Dict = self.generate_beam(
input_embeds=snake_case , device=snake_case , eos_token_id=snake_case )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A_ : int = torch.stack(snake_case )
A_ : int = torch.stack(snake_case )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int=None , snake_case :str=None , snake_case :int=None , snake_case :int = 5 , snake_case :int = 67 , snake_case :float = 1.0 , snake_case :Optional[int] = None , ):
'''simple docstring'''
A_ : Optional[Any] = eos_token_id
A_ : List[Any] = None
A_ : List[Any] = None
A_ : str = torch.ones(snake_case , device=snake_case , dtype=torch.int )
A_ : Any = torch.zeros(snake_case , device=snake_case , dtype=torch.bool )
if input_embeds is not None:
A_ : Any = input_embeds
else:
A_ : Optional[Any] = self.transformer.transformer.wte(snake_case )
for i in range(snake_case ):
A_ : Optional[Any] = self.transformer(inputs_embeds=snake_case )
A_ : str = outputs.logits
A_ : int = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A_ : List[str] = logits.softmax(-1 ).log()
if scores is None:
A_ , A_ : Union[str, Any] = logits.topk(snake_case , -1 )
A_ : Tuple = generated.expand(snake_case , *generated.shape[1:] )
A_ , A_ : str = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A_ : Union[str, Any] = next_tokens
else:
A_ : List[str] = tokens.expand(snake_case , *tokens.shape[1:] )
A_ : Union[str, Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
A_ : List[str] = -float(np.inf )
A_ : List[Any] = 0
A_ : Union[str, Any] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A_ : Optional[Any] = scores_sum / seq_lengths[:, None]
A_ , A_ : List[str] = scores_sum_average.view(-1 ).topk(snake_case , -1 )
A_ : str = next_tokens // scores_sum.shape[1]
A_ : Union[str, Any] = seq_lengths[next_tokens_source]
A_ : Optional[int] = next_tokens % scores_sum.shape[1]
A_ : Tuple = next_tokens.unsqueeze(1 )
A_ : Tuple = tokens[next_tokens_source]
A_ : Dict = torch.cat((tokens, next_tokens) , dim=1 )
A_ : Dict = generated[next_tokens_source]
A_ : Union[str, Any] = scores_sum_average * seq_lengths
A_ : Optional[int] = is_stopped[next_tokens_source]
A_ : Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A_ : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1 )
A_ : Any = is_stopped + next_tokens.eq(snake_case ).squeeze()
if is_stopped.all():
break
A_ : int = scores / seq_lengths
A_ : str = scores.argsort(descending=snake_case )
# tokens tensors are already padded to max_seq_length
A_ : Dict = [tokens[i] for i in order]
A_ : int = torch.stack(snake_case , dim=0 )
A_ : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 300
| 1
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __snake_case ( _lowerCAmelCase : int = 8 ) -> str:
A_ : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : int ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_lowerCAmelCase )
A_ : Optional[Any] = i // 3
A_ : Any = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
A_ : Optional[int] = (
chars_incl
+ random(_lowerCAmelCase , quotient + remainder )
+ random(_lowerCAmelCase , _lowerCAmelCase )
+ random(_lowerCAmelCase , _lowerCAmelCase )
)
A_ : str = list(_lowerCAmelCase )
shuffle(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
# random is a generalised function for letters, characters and numbers
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : int ) -> str:
return "".join(secrets.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) )
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Optional[Any]:
pass # Put your code here...
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ) -> int:
pass # Put your code here...
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ) -> Tuple:
pass # Put your code here...
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : int = 8 ) -> bool:
if len(_lowerCAmelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
A_ : str = any(char in ascii_uppercase for char in password )
A_ : Optional[Any] = any(char in ascii_lowercase for char in password )
A_ : Optional[Any] = any(char in digits for char in password )
A_ : Optional[Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __snake_case ( ) -> Tuple:
A_ : List[Any] = int(input("Please indicate the max length of your password: " ).strip() )
A_ : Tuple = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCAmelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCAmelCase , _lowerCAmelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 300
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *snake_case :Tuple , **snake_case :Any ):
'''simple docstring'''
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , snake_case , )
super().__init__(*snake_case , **snake_case )
| 300
| 1
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : str = logging.get_logger()
# the current default level is logging.WARNING
A_ : int = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[int] = logging.get_verbosity()
A_ : List[Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
A_ : str = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case ) as cl:
logger.warning(snake_case )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case ) as cl:
logger.warning(snake_case )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case ) as cl:
logger.warning(snake_case )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(snake_case )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
A_ : List[str] = logging.get_logger("transformers.models.bart.tokenization_bart" )
A_ : Optional[int] = os.getenv("TRANSFORMERS_VERBOSITY" , snake_case )
A_ : Optional[Any] = logging.log_levels[env_level_str]
A_ : Any = logging.get_verbosity()
self.assertEqual(
snake_case , snake_case , f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
A_ : Tuple = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
A_ : Dict = logging.logging.getLogger()
with CaptureLogger(snake_case ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
A_ : List[Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
A_ : Tuple = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case ) as cl:
logger.warning_advice(snake_case )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case ) as cl:
logger.warning_advice(snake_case )
self.assertEqual(cl.out , msg + "\n" )
def __snake_case ( ) -> Tuple:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 300
|
from __future__ import annotations
def __snake_case ( _lowerCAmelCase : list[float] ) -> bool:
if len(_lowerCAmelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A_ : List[str] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 1
|
class __magic_name__ :
"""simple docstring"""
def __init__( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = 0
A_ : Optional[Any] = 0
A_ : int = {}
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Any ):
'''simple docstring'''
if vertex not in self.adjacency:
A_ : Union[str, Any] = {}
self.num_vertices += 1
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Any] , snake_case :Optional[Any] , snake_case :int ):
'''simple docstring'''
self.add_vertex(snake_case )
self.add_vertex(snake_case )
if head == tail:
return
A_ : Any = weight
A_ : Optional[Any] = weight
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[int] = self.get_edges()
for edge in edges:
A_ , A_ , A_ : Optional[Any] = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case ) ):
A_ : List[str] = list(edges[i] )
edges.sort(key=lambda snake_case : e[2] )
for i in range(len(snake_case ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
A_ : List[Any] = edges[i][2] + 1
for edge in edges:
A_ , A_ , A_ : Any = edge
A_ : List[Any] = weight
A_ : Optional[Any] = weight
def __str__( self :Tuple ):
'''simple docstring'''
A_ : Any = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
A_ : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n" )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : str = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :str=None , snake_case :Tuple=None ):
'''simple docstring'''
A_ : Dict = Graph()
if vertices is None:
A_ : str = []
if edges is None:
A_ : Optional[Any] = []
for vertex in vertices:
g.add_vertex(snake_case )
for edge in edges:
g.add_edge(*snake_case )
return g
class __magic_name__ :
"""simple docstring"""
def __init__( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = {}
A_ : Union[str, Any] = {}
def __len__( self :Optional[int] ):
'''simple docstring'''
return len(self.parent )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Any ):
'''simple docstring'''
if item in self.parent:
return self.find(snake_case )
A_ : Optional[int] = item
A_ : Dict = 0
return item
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Tuple ):
'''simple docstring'''
if item not in self.parent:
return self.make_set(snake_case )
if item != self.parent[item]:
A_ : Union[str, Any] = self.find(self.parent[item] )
return self.parent[item]
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int , snake_case :List[str] ):
'''simple docstring'''
A_ : str = self.find(snake_case )
A_ : Optional[int] = self.find(snake_case )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
A_ : Tuple = roota
return roota
if self.rank[roota] < self.rank[roota]:
A_ : List[str] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
A_ : str = roota
return roota
return None
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = graph.num_vertices
A_ : int = Graph.UnionFind()
A_ : Any = []
while num_components > 1:
A_ : Union[str, Any] = {}
for vertex in graph.get_vertices():
A_ : Optional[int] = -1
A_ : Optional[Any] = graph.get_edges()
for edge in edges:
A_ , A_ , A_ : Optional[Any] = edge
edges.remove((tail, head, weight) )
for edge in edges:
A_ , A_ , A_ : Optional[Any] = edge
A_ : Optional[int] = union_find.find(snake_case )
A_ : List[Any] = union_find.find(snake_case )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A_ : Dict = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A_ : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
A_ , A_ , A_ : Optional[int] = cheap_edge[vertex]
if union_find.find(snake_case ) != union_find.find(snake_case ):
union_find.union(snake_case , snake_case )
mst_edges.append(cheap_edge[vertex] )
A_ : Optional[Any] = num_components - 1
A_ : Dict = Graph.build(edges=snake_case )
return mst
| 300
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , snake_case :AutoencoderKL , snake_case :CLIPTextModel , snake_case :CLIPTokenizer , snake_case :UNetaDConditionModel , snake_case :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case :StableDiffusionSafetyChecker , snake_case :CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def __call__( self :Any , snake_case :Union[str, List[str]] , snake_case :int = 512 , snake_case :int = 512 , snake_case :int = 50 , snake_case :float = 7.5 , snake_case :Optional[Union[str, List[str]]] = None , snake_case :Optional[int] = 1 , snake_case :float = 0.0 , snake_case :Optional[torch.Generator] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[str] = "pil" , snake_case :bool = True , snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case :int = 1 , snake_case :Optional[torch.FloatTensor] = None , **snake_case :Optional[Any] , ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
A_ : Dict = 1
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = len(snake_case )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case )}." )
# get prompt text embeddings
A_ : int = self.tokenizer(
snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_ , A_ , A_ : int = text_embeddings.shape
A_ : List[str] = text_embeddings.repeat(1 , snake_case , 1 )
A_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ : List[str]
if negative_prompt is None:
A_ : List[str] = [""]
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !="
f" {type(snake_case )}." )
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A_ : Any = negative_prompt
A_ : Optional[int] = text_input_ids.shape[-1]
A_ : Dict = self.tokenizer(
snake_case , padding="max_length" , max_length=snake_case , truncation=snake_case , return_tensors="pt" , )
A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ : Tuple = uncond_embeddings.shape[1]
A_ : Dict = uncond_embeddings.repeat(snake_case , snake_case , 1 )
A_ : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A_ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ : Tuple = torch.randn(
snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(self.device )
A_ : Optional[Any] = torch.randn(snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(
self.device )
else:
A_ : int = torch.randn(
snake_case , generator=snake_case , device=self.device , dtype=snake_case )
A_ : Optional[int] = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A_ : Tuple = latents_reference.to(self.device )
A_ : Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A_ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A_ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A_ : Optional[Any] = 0 if dx < 0 else dx
A_ : Optional[Any] = 0 if dy < 0 else dy
A_ : List[str] = max(-dx , 0 )
A_ : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A_ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ : str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : List[str] = {}
if accepts_eta:
A_ : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
A_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
A_ : List[str] = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.chunk(2 )
A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
A_ : List[str] = 1 / 0.18215 * latents
A_ : Tuple = self.vae.decode(snake_case ).sample
A_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A_ : int = self.feature_extractor(self.numpy_to_pil(snake_case ) , return_tensors="pt" ).to(
self.device )
A_ , A_ : List[str] = self.safety_checker(
images=snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A_ : List[str] = None
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
| 300
| 1
|
def __snake_case ( _lowerCAmelCase : int ) -> None:
A_ : Optional[int] = generate_pascal_triangle(_lowerCAmelCase )
for row_idx in range(_lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def __snake_case ( _lowerCAmelCase : int ) -> list[list[int]]:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A_ : list[list[int]] = []
for current_row_idx in range(_lowerCAmelCase ):
A_ : List[str] = populate_current_row(_lowerCAmelCase , _lowerCAmelCase )
triangle.append(_lowerCAmelCase )
return triangle
def __snake_case ( _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : int ) -> list[int]:
A_ : Dict = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A_ , A_ : Any = 1, 1
for current_col_idx in range(1 , _lowerCAmelCase ):
calculate_current_element(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return current_row
def __snake_case ( _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , ) -> None:
A_ : Optional[Any] = triangle[current_row_idx - 1][current_col_idx - 1]
A_ : str = triangle[current_row_idx - 1][current_col_idx]
A_ : List[Any] = above_to_left_elt + above_to_right_elt
def __snake_case ( _lowerCAmelCase : int ) -> list[list[int]]:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A_ : list[list[int]] = [[1]]
for row_index in range(1 , _lowerCAmelCase ):
A_ : str = [0] + result[-1] + [0]
A_ : Optional[Any] = row_index + 1
# Calculate the number of distinct elements in a row
A_ : Dict = sum(divmod(_lowerCAmelCase , 2 ) )
A_ : Any = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A_ : Optional[Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A_ : Union[str, Any] = row_first_half + row_second_half
result.append(_lowerCAmelCase )
return result
def __snake_case ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCAmelCase : Callable , _lowerCAmelCase : int ) -> None:
A_ : str = f"{func.__name__}({value})"
A_ : Optional[int] = timeit(f"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_lowerCAmelCase , _lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 300
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Dict:
A_ : Optional[Any] = nn.functional.normalize(_lowerCAmelCase )
A_ : List[str] = nn.functional.normalize(_lowerCAmelCase )
return torch.mm(_lowerCAmelCase , normalized_text_embeds.t() )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self :int , snake_case :CLIPConfig ):
'''simple docstring'''
super().__init__(snake_case )
A_ : int = CLIPVisionModel(config.vision_config )
A_ : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case )
A_ : Tuple = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case )
A_ : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case )
A_ : List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=snake_case )
A_ : int = nn.Parameter(torch.ones(3 ) , requires_grad=snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ):
'''simple docstring'''
A_ : List[Any] = self.vision_model(snake_case )[1] # pooled_output
A_ : List[Any] = self.visual_projection(snake_case )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Optional[Any] = cosine_distance(snake_case , self.special_care_embeds ).cpu().float().numpy()
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ).cpu().float().numpy()
A_ : Union[str, Any] = []
A_ : Any = image_embeds.shape[0]
for i in range(snake_case ):
A_ : Optional[int] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A_ : Optional[Any] = special_cos_dist[i][concept_idx]
A_ : Tuple = self.special_care_embeds_weights[concept_idx].item()
A_ : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
A_ : Any = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A_ : Tuple = cos_dist[i][concept_idx]
A_ : Tuple = self.concept_embeds_weights[concept_idx].item()
A_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(snake_case )
result.append(snake_case )
A_ : Any = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor ):
'''simple docstring'''
A_ : List[str] = self.vision_model(snake_case )[1] # pooled_output
A_ : int = self.visual_projection(snake_case )
A_ : Tuple = cosine_distance(snake_case , self.special_care_embeds )
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
A_ : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A_ : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
A_ : Optional[Any] = special_care * 0.01
A_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A_ : Union[str, Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A_ : Union[str, Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 300
| 1
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 300
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
A_ : Tuple = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Dict:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : str = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" )
A_ : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : Optional[Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Optional[Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ) -> Any:
A_ : Dict = dct.pop(_lowerCAmelCase )
A_ : List[Any] = val
def __snake_case ( _lowerCAmelCase : List[str] ) -> int:
if "handwritten" in checkpoint_url:
A_ : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
A_ : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
A_ : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=_lowerCAmelCase )
A_ : Tuple = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : Tuple = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Optional[Any] = 1024
A_ : Union[str, Any] = 4096
A_ : Union[str, Any] = 24
A_ : List[Any] = 16
A_ : List[str] = 1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Dict = False
A_ : int = "relu"
A_ : Optional[int] = 1024
A_ : Any = True
A_ : List[Any] = False
A_ : Optional[int] = False
# load HuggingFace model
A_ : Union[str, Any] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase )
A_ : str = TrOCRForCausalLM(_lowerCAmelCase )
A_ : List[str] = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
A_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" , check_hash=_lowerCAmelCase )["model"]
A_ : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Dict = state_dict.pop(_lowerCAmelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
A_ : List[str] = val
else:
A_ : Optional[Any] = val
# load state dict
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A_ : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
A_ : Any = RobertaTokenizer.from_pretrained("roberta-large" )
A_ : Union[str, Any] = TrOCRProcessor(_lowerCAmelCase , _lowerCAmelCase )
A_ : List[str] = processor(images=prepare_img(_lowerCAmelCase ) , return_tensors="pt" ).pixel_values
# verify logits
A_ : Union[str, Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Optional[int] = model(pixel_values=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
A_ : Tuple = outputs.logits
A_ : Union[str, Any] = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Union[str, Any] = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : str = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 300
| 1
|
from collections.abc import Generator
def __snake_case ( ) -> Generator[int, None, None]:
A_ , A_ : int = 0, 1
while True:
A_ , A_ : List[str] = b, a + b
yield b
def __snake_case ( _lowerCAmelCase : int = 1000 ) -> int:
A_ : int = 1
A_ : List[str] = fibonacci_generator()
while len(str(next(_lowerCAmelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 300
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 1
@register_to_config
def __init__( self :Union[str, Any] , snake_case :int = 2_000 , snake_case :float = 0.15 , snake_case :float = 0.01 , snake_case :float = 1348.0 , snake_case :float = 1e-5 , snake_case :int = 1 , ):
'''simple docstring'''
A_ : Dict = sigma_max
# setable values
A_ : List[Any] = None
self.set_sigmas(snake_case , snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :torch.FloatTensor , snake_case :Optional[int] = None ):
'''simple docstring'''
return sample
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :float = None , snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
A_ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
A_ : Tuple = torch.linspace(1 , snake_case , snake_case , device=snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int , snake_case :float = None , snake_case :float = None , snake_case :float = None ):
'''simple docstring'''
A_ : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min
A_ : Any = sigma_max if sigma_max is not None else self.config.sigma_max
A_ : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(snake_case , snake_case )
A_ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
A_ : Any = torch.exp(torch.linspace(math.log(snake_case ) , math.log(snake_case ) , snake_case ) )
A_ : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Dict ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :int , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
A_ : int = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
A_ : Optional[Any] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
A_ : Dict = timesteps.to(self.discrete_sigmas.device )
A_ : Optional[int] = self.discrete_sigmas[timesteps].to(sample.device )
A_ : int = self.get_adjacent_sigma(snake_case , snake_case ).to(sample.device )
A_ : Union[str, Any] = torch.zeros_like(snake_case )
A_ : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
A_ : Optional[int] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
A_ : Tuple = diffusion.unsqueeze(-1 )
A_ : Optional[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
A_ : List[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=snake_case , device=sample.device , dtype=sample.dtype )
A_ : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
A_ : Any = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=snake_case , prev_sample_mean=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
A_ : Dict = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
A_ : int = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
A_ : List[Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
A_ : Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
A_ : Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
A_ : int = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
A_ : str = step_size.unsqueeze(-1 )
A_ : Optional[Any] = sample + step_size * model_output
A_ : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , ):
'''simple docstring'''
A_ : Union[str, Any] = timesteps.to(original_samples.device )
A_ : List[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
A_ : List[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(snake_case ) * sigmas[:, None, None, None]
)
A_ : Optional[int] = noise + original_samples
return noisy_samples
def __len__( self :Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 1
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , snake_case :Any=0.01 , snake_case :List[Any]=1_000 ):
'''simple docstring'''
A_ : Optional[int] = p_stop
A_ : int = max_length
def __iter__( self :Dict ):
'''simple docstring'''
A_ : Optional[int] = 0
A_ : List[str] = False
while not stop and count < self.max_length:
yield count
count += 1
A_ : List[str] = random.random() < self.p_stop
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , snake_case :Dict , snake_case :List[Any]=False , snake_case :Optional[Any]=True ):
'''simple docstring'''
A_ : int = [
BatchSamplerShard(snake_case , 2 , snake_case , split_batches=snake_case , even_batches=snake_case )
for i in range(2 )
]
A_ : str = [list(snake_case ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(snake_case ) for shard in batch_sampler_shards] , [len(snake_case ) for e in expected] )
self.assertListEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case )
A_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(snake_case , snake_case )
A_ : Optional[int] = BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case , snake_case )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
A_ : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case )
A_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(snake_case , snake_case )
A_ : List[str] = BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case )
A_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case , snake_case )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
A_ : Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case )
A_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(snake_case , snake_case )
A_ : Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case )
A_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case , snake_case )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
A_ : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case )
A_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(snake_case , snake_case )
A_ : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case )
A_ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case , snake_case )
# Check the shards when the dataset is very small.
A_ : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case )
A_ : Tuple = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(snake_case , snake_case )
A_ : Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case )
A_ : List[Any] = [[], []]
self.check_batch_sampler_shards(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case )
A_ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(snake_case , snake_case , split_batches=snake_case )
A_ : Optional[int] = BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case , snake_case , split_batches=snake_case )
# Check the shards when the dataset is not a round multiple of batch size.
A_ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case )
A_ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(snake_case , snake_case , split_batches=snake_case )
A_ : str = BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case )
A_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case , snake_case , split_batches=snake_case )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
A_ : Tuple = BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case )
A_ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(snake_case , snake_case , split_batches=snake_case )
A_ : Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case )
A_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case , snake_case , split_batches=snake_case )
# Check the shards when the dataset is very small.
A_ : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case )
A_ : str = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(snake_case , snake_case , split_batches=snake_case )
A_ : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case )
A_ : Any = [[], []]
self.check_batch_sampler_shards(snake_case , snake_case , split_batches=snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case )
A_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(snake_case , snake_case , even_batches=snake_case )
A_ : Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case , snake_case , even_batches=snake_case )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
A_ : Any = BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case )
A_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case , snake_case , even_batches=snake_case )
A_ : Optional[int] = BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case )
A_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case , snake_case , even_batches=snake_case )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
A_ : Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case )
A_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(snake_case , snake_case , even_batches=snake_case )
A_ : str = BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case )
A_ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case , snake_case , even_batches=snake_case )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
A_ : Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case )
A_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case , snake_case , even_batches=snake_case )
A_ : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case )
A_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case , snake_case , even_batches=snake_case )
# Check the shards when the dataset is very small.
A_ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case )
A_ : Tuple = [[[0, 1]], []]
self.check_batch_sampler_shards(snake_case , snake_case , even_batches=snake_case )
A_ : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case )
A_ : int = [[], []]
self.check_batch_sampler_shards(snake_case , snake_case , even_batches=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case )
A_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(snake_case , snake_case , split_batches=snake_case , even_batches=snake_case )
A_ : Optional[int] = BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case , snake_case , split_batches=snake_case , even_batches=snake_case )
# Check the shards when the dataset is not a round multiple of batch size.
A_ : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case )
A_ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case , snake_case , split_batches=snake_case , even_batches=snake_case )
A_ : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case )
A_ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case , snake_case , split_batches=snake_case , even_batches=snake_case )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
A_ : Optional[int] = BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case )
A_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case , snake_case , split_batches=snake_case , even_batches=snake_case )
A_ : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case )
A_ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case , snake_case , split_batches=snake_case , even_batches=snake_case )
# Check the shards when the dataset is very small.
A_ : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case )
A_ : Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(snake_case , snake_case , split_batches=snake_case , even_batches=snake_case )
A_ : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case )
A_ : Any = [[], []]
self.check_batch_sampler_shards(snake_case , snake_case , split_batches=snake_case , even_batches=snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
A_ : Optional[int] = [BatchSamplerShard(snake_case , 2 , snake_case , even_batches=snake_case ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Tuple , snake_case :List[str] , snake_case :List[Any] , snake_case :List[Any]=False , snake_case :Tuple=2 , snake_case :Any=False ):
'''simple docstring'''
random.seed(snake_case )
A_ : Any = list(snake_case )
A_ : str = [
IterableDatasetShard(
snake_case , batch_size=snake_case , drop_last=snake_case , num_processes=snake_case , process_index=snake_case , split_batches=snake_case , )
for i in range(snake_case )
]
A_ : Any = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(snake_case )
iterable_dataset_lists.append(list(snake_case ) )
A_ : Union[str, Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
A_ : List[str] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(snake_case ) , len(snake_case ) )
self.assertTrue(len(snake_case ) % shard_batch_size == 0 )
A_ : Optional[Any] = []
for idx in range(0 , len(snake_case ) , snake_case ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(snake_case ) < len(snake_case ):
reference += reference
self.assertListEqual(snake_case , reference[: len(snake_case )] )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = 42
A_ : Dict = RandomIterableDataset()
self.check_iterable_dataset_shards(snake_case , snake_case , batch_size=4 , drop_last=snake_case , split_batches=snake_case )
self.check_iterable_dataset_shards(snake_case , snake_case , batch_size=4 , drop_last=snake_case , split_batches=snake_case )
self.check_iterable_dataset_shards(snake_case , snake_case , batch_size=4 , drop_last=snake_case , split_batches=snake_case )
self.check_iterable_dataset_shards(snake_case , snake_case , batch_size=4 , drop_last=snake_case , split_batches=snake_case )
# Edge case with a very small dataset
A_ : Tuple = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(snake_case , snake_case , batch_size=4 , drop_last=snake_case , split_batches=snake_case )
self.check_iterable_dataset_shards(snake_case , snake_case , batch_size=4 , drop_last=snake_case , split_batches=snake_case )
self.check_iterable_dataset_shards(snake_case , snake_case , batch_size=4 , drop_last=snake_case , split_batches=snake_case )
self.check_iterable_dataset_shards(snake_case , snake_case , batch_size=4 , drop_last=snake_case , split_batches=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = BatchSampler(range(16 ) , batch_size=4 , drop_last=snake_case )
A_ : int = SkipBatchSampler(snake_case , 2 )
self.assertListEqual(list(snake_case ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Dict = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = DataLoader(list(range(16 ) ) , batch_size=4 )
A_ : Tuple = skip_first_batches(snake_case , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(snake_case ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(snake_case ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
Accelerator()
A_ : str = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(snake_case ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(snake_case ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 300
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : float | Decimal , _lowerCAmelCase : float = 10**-10 ) -> float:
A_ : Dict = a
while True:
A_ : Union[str, Any] = Decimal(_lowerCAmelCase ) - (
Decimal(eval(_lowerCAmelCase ) ) / Decimal(eval(str(diff(_lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCAmelCase ) ) < precision: # noqa: S307
return float(_lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 300
| 1
|
import math
def __snake_case ( _lowerCAmelCase : int ) -> bool:
return math.sqrt(_lowerCAmelCase ) * math.sqrt(_lowerCAmelCase ) == num
def __snake_case ( _lowerCAmelCase : int ) -> bool:
A_ : Tuple = 0
A_ : Any = n
while left <= right:
A_ : Optional[int] = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
A_ : Tuple = mid - 1
else:
A_ : Union[str, Any] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowerCAmelCase : List[Any] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
_lowerCAmelCase : Union[str, Any] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
_lowerCAmelCase : Optional[Any] = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Optional[int] , snake_case :List[Any] , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , ):
'''simple docstring'''
A_ : List[str] = len(references[0] )
if any(len(snake_case ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A_ : int = [[refs[i] for refs in references] for i in range(snake_case )]
A_ : Optional[Any] = TER(
normalized=snake_case , no_punct=snake_case , asian_support=snake_case , case_sensitive=snake_case , )
A_ : List[Any] = sb_ter.corpus_score(snake_case , snake_case )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 300
| 1
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 300
|
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int ) -> str:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=0 ) -> Any:
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[column] )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Any=float("inf" ) ) -> int:
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowerCAmelCase ):
A_ : Tuple = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
A_ : Union[str, Any] = current_dis
return min_dis
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=float("inf" ) ) -> Dict:
for i in range(min(6 , points_counts - 1 ) , _lowerCAmelCase ):
for j in range(max(0 , i - 6 ) , _lowerCAmelCase ):
A_ : List[Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
A_ : Union[str, Any] = current_dis
return min_dis
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> List[str]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(_lowerCAmelCase , _lowerCAmelCase )
# recursion
A_ : Optional[int] = points_counts // 2
A_ : List[Any] = closest_pair_of_points_sqr(
_lowerCAmelCase , points_sorted_on_y[:mid] , _lowerCAmelCase )
A_ : List[Any] = closest_pair_of_points_sqr(
_lowerCAmelCase , points_sorted_on_y[mid:] , points_counts - mid )
A_ : Tuple = min(_lowerCAmelCase , _lowerCAmelCase )
A_ : Dict = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowerCAmelCase )
A_ : Tuple = dis_between_closest_in_strip(
_lowerCAmelCase , len(_lowerCAmelCase ) , _lowerCAmelCase )
return min(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Any:
A_ : Optional[Any] = column_based_sort(_lowerCAmelCase , column=0 )
A_ : Optional[int] = column_based_sort(_lowerCAmelCase , column=1 )
return (
closest_pair_of_points_sqr(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
) ** 0.5
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 300
| 1
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 1
@register_to_config
def __init__( self :Union[str, Any] , snake_case :int = 2_000 , snake_case :float = 0.15 , snake_case :float = 0.01 , snake_case :float = 1348.0 , snake_case :float = 1e-5 , snake_case :int = 1 , ):
'''simple docstring'''
A_ : Dict = sigma_max
# setable values
A_ : List[Any] = None
self.set_sigmas(snake_case , snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :torch.FloatTensor , snake_case :Optional[int] = None ):
'''simple docstring'''
return sample
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :float = None , snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
A_ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
A_ : Tuple = torch.linspace(1 , snake_case , snake_case , device=snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int , snake_case :float = None , snake_case :float = None , snake_case :float = None ):
'''simple docstring'''
A_ : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min
A_ : Any = sigma_max if sigma_max is not None else self.config.sigma_max
A_ : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(snake_case , snake_case )
A_ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
A_ : Any = torch.exp(torch.linspace(math.log(snake_case ) , math.log(snake_case ) , snake_case ) )
A_ : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Dict ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :int , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
A_ : int = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
A_ : Optional[Any] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
A_ : Dict = timesteps.to(self.discrete_sigmas.device )
A_ : Optional[int] = self.discrete_sigmas[timesteps].to(sample.device )
A_ : int = self.get_adjacent_sigma(snake_case , snake_case ).to(sample.device )
A_ : Union[str, Any] = torch.zeros_like(snake_case )
A_ : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
A_ : Optional[int] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
A_ : Tuple = diffusion.unsqueeze(-1 )
A_ : Optional[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
A_ : List[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=snake_case , device=sample.device , dtype=sample.dtype )
A_ : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
A_ : Any = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=snake_case , prev_sample_mean=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
A_ : Dict = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
A_ : int = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
A_ : List[Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
A_ : Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
A_ : Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
A_ : int = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
A_ : str = step_size.unsqueeze(-1 )
A_ : Optional[Any] = sample + step_size * model_output
A_ : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , ):
'''simple docstring'''
A_ : Union[str, Any] = timesteps.to(original_samples.device )
A_ : List[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
A_ : List[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(snake_case ) * sigmas[:, None, None, None]
)
A_ : Optional[int] = noise + original_samples
return noisy_samples
def __len__( self :Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self :Dict , snake_case :Optional[int] , snake_case :Tuple=13 , snake_case :List[Any]=30 , snake_case :Union[str, Any]=2 , snake_case :List[Any]=3 , snake_case :Tuple=True , snake_case :Dict=True , snake_case :Dict=32 , snake_case :List[str]=5 , snake_case :Optional[Any]=4 , snake_case :Any=37 , snake_case :Dict="gelu" , snake_case :List[str]=0.1 , snake_case :str=0.1 , snake_case :Tuple=10 , snake_case :str=0.02 , snake_case :Optional[Any]=None , ):
'''simple docstring'''
A_ : Tuple = parent
A_ : int = batch_size
A_ : List[str] = image_size
A_ : List[Any] = patch_size
A_ : Optional[Any] = num_channels
A_ : List[Any] = is_training
A_ : Tuple = use_labels
A_ : Union[str, Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Any = num_attention_heads
A_ : List[str] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Any = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : Dict = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Optional[int] = (image_size // patch_size) ** 2
A_ : List[str] = num_patches + 1
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Tuple = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[Any] , snake_case :str , snake_case :Tuple ):
'''simple docstring'''
A_ : Optional[Any] = ViTMSNModel(config=snake_case )
model.to(snake_case )
model.eval()
A_ : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[int] , snake_case :List[str] , snake_case :List[str] ):
'''simple docstring'''
A_ : Dict = self.type_sequence_label_size
A_ : Tuple = ViTMSNForImageClassification(snake_case )
model.to(snake_case )
model.eval()
A_ : Union[str, Any] = model(snake_case , labels=snake_case )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Union[str, Any] = 1
A_ : int = ViTMSNForImageClassification(snake_case )
model.to(snake_case )
model.eval()
A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Optional[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : List[str] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Optional[int] = config_and_inputs
A_ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = ViTMSNModelTester(self )
A_ : str = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[int] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(snake_case )
A_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[str] = [*signature.parameters.keys()]
A_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = ViTMSNModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __snake_case ( ) -> Optional[Any]:
A_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
torch.manual_seed(2 )
A_ : Any = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(snake_case )
A_ : List[str] = self.default_image_processor
A_ : int = prepare_img()
A_ : List[str] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
A_ : Optional[int] = model(**snake_case )
# verify the logits
A_ : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case )
A_ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
| 300
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : List[Any] = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[Any] = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 300
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :str ):
'''simple docstring'''
A_ : Dict = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**snake_case )
return config
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Tuple = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : List[str] = scheduler_class(**snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : int = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : int = scheduler_class(**snake_case )
A_ : Tuple = len(snake_case )
A_ : List[str] = self.dummy_model()
A_ : Optional[Any] = self.dummy_sample_deter
A_ : List[str] = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Tuple = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : Optional[int] = pred_prev_sample
A_ : Tuple = torch.sum(torch.abs(snake_case ) )
A_ : str = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Optional[int] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config(prediction_type="v_prediction" )
A_ : List[str] = scheduler_class(**snake_case )
A_ : int = len(snake_case )
A_ : Dict = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Any = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Optional[int] = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Tuple = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : List[str] = pred_prev_sample
A_ : Optional[Any] = torch.sum(torch.abs(snake_case ) )
A_ : List[str] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Dict = scheduler_class(**snake_case )
A_ : Optional[int] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case )
A_ : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(snake_case ):
if i == len(snake_case ) - 1:
A_ : str = -1
else:
A_ : List[str] = timesteps[i + 1]
A_ : Optional[int] = scheduler.previous_timestep(snake_case )
A_ : List[str] = prev_t.item()
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**snake_case )
A_ : List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Any = self.scheduler_classes[0]
A_ : Union[str, Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Union[str, Any] = [100, 87, 50, 1, 0]
A_ : Optional[int] = len(snake_case )
with self.assertRaises(snake_case , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=snake_case )
| 300
| 1
|
_lowerCAmelCase : Optional[Any] = 8.314_4598
def __snake_case ( _lowerCAmelCase : float , _lowerCAmelCase : float ) -> float:
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_lowerCAmelCase : List[str] = 300
_lowerCAmelCase : Optional[Any] = 28
_lowerCAmelCase : Tuple = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : int = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ) -> List[Any]:
for attribute in key.split("." ):
A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
A_ : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Any = value
elif weight_type == "bias":
A_ : str = value
else:
A_ : Any = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ) -> List[str]:
A_ : Optional[Any] = []
A_ : Any = fairseq_model.state_dict()
A_ : Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A_ : str = None
for name, value in fairseq_dict.items():
A_ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
A_ : Optional[Any] = True
elif name.split("." )[0] == "proj":
A_ : Dict = fairseq_model.proj
A_ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ : int = True
if "*" in mapped_key:
A_ : Optional[Any] = name.split(_lowerCAmelCase )[0].split("." )[-2]
A_ : int = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
A_ : List[Any] = "weight_g"
elif "weight_v" in name:
A_ : List[Any] = "weight_v"
elif "bias" in name:
A_ : Dict = "bias"
elif "weight" in name:
A_ : List[Any] = "weight"
else:
A_ : Dict = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str:
A_ : Any = full_name.split("conv_layers." )[-1]
A_ : Optional[int] = name.split("." )
A_ : Optional[Any] = int(items[0] )
A_ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
A_ : List[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
A_ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
A_ : List[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
A_ : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : Optional[int] ) -> str:
A_ , A_ : List[str] = emb.weight.shape
A_ : Optional[int] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
A_ : List[Any] = emb.weight.data
return lin_layer
def __snake_case ( _lowerCAmelCase : str ) -> Tuple:
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
A_ : int = f.readlines()
A_ : Dict = [line.split(" " )[0] for line in lines]
A_ : Tuple = len(_lowerCAmelCase )
A_ : Union[str, Any] = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , ) -> Tuple:
A_ : Optional[int] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
A_ : str = SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase )
A_ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
A_ , A_ , A_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
A_ : Union[str, Any] = model[0].eval()
# set weights for wav2vec2 encoder
A_ : Tuple = WavaVecaModel(_lowerCAmelCase )
A_ : str = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
A_ : Tuple = SpeechaTextaForCausalLM(_lowerCAmelCase )
A_ , A_ : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
A_ : Union[str, Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
A_ : str = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
A_ : Optional[Any] = False
# add projection layer
A_ : Optional[Any] = nn.Parameter(projection_layer.weight )
A_ : int = nn.Parameter(projection_layer.bias )
A_ : str = create_vocab_dict(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "vocab.json" ) , "w" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
A_ : Any = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , "vocab.json" ) )
tokenizer.save_pretrained(_lowerCAmelCase )
A_ : Optional[int] = hf_wavavec.config.to_dict()
A_ : int = tokenizer.pad_token_id
A_ : List[str] = tokenizer.bos_token_id
A_ : List[str] = tokenizer.eos_token_id
A_ : List[str] = "speech_to_text_2"
A_ : Tuple = "wav2vec2"
A_ : str = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=10_224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 300
| 1
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : str = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
A_ : Dict = AutoTokenizer.from_pretrained("google/mt5-small" )
A_ : int = tokenizer("Hello there" , return_tensors="np" ).input_ids
A_ : Optional[int] = tokenizer("Hi I am" , return_tensors="np" ).input_ids
A_ : Optional[Any] = shift_tokens_right(snake_case , model.config.pad_token_id , model.config.decoder_start_token_id )
A_ : str = model(snake_case , decoder_input_ids=snake_case ).logits
A_ : Optional[int] = optax.softmax_cross_entropy(snake_case , onehot(snake_case , logits.shape[-1] ) ).mean()
A_ : Dict = -(labels.shape[-1] * loss.item())
A_ : Any = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 300
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __magic_name__ :
"""simple docstring"""
def __init__( self :Tuple , snake_case :Optional[Any] , snake_case :Tuple=13 , snake_case :Dict=7 , snake_case :List[Any]=True , snake_case :List[Any]=True , snake_case :Dict=True , snake_case :Any=True , snake_case :Optional[int]=99 , snake_case :Any=32 , snake_case :Dict=2 , snake_case :int=4 , snake_case :Optional[int]=37 , snake_case :List[str]="gelu" , snake_case :List[Any]=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Tuple=512 , snake_case :Tuple=16 , snake_case :Tuple=2 , snake_case :Optional[int]=0.02 , snake_case :str=3 , snake_case :Optional[int]=4 , snake_case :List[str]=None , snake_case :Tuple=1_000 , ):
'''simple docstring'''
A_ : str = parent
A_ : str = batch_size
A_ : str = seq_length
A_ : Any = is_training
A_ : Any = use_input_mask
A_ : str = use_token_type_ids
A_ : Tuple = use_labels
A_ : Optional[Any] = vocab_size
A_ : Dict = hidden_size
A_ : str = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : str = intermediate_size
A_ : int = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : Any = type_sequence_label_size
A_ : Dict = initializer_range
A_ : Any = num_labels
A_ : Optional[int] = num_choices
A_ : Optional[Any] = scope
A_ : Any = range_bbox
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ : str = bbox[i, j, 3]
A_ : Union[str, Any] = bbox[i, j, 1]
A_ : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ : Any = bbox[i, j, 2]
A_ : Tuple = bbox[i, j, 0]
A_ : int = t
A_ : int = tf.convert_to_tensor(snake_case )
A_ : Any = None
if self.use_input_mask:
A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : str = None
if self.use_token_type_ids:
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Dict = None
A_ : List[Any] = None
A_ : List[str] = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : str = ids_tensor([self.batch_size] , self.num_choices )
A_ : int = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , snake_case :Union[str, Any] , snake_case :int , snake_case :int , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Optional[int] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Any = TFLayoutLMModel(config=snake_case )
A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
A_ : str = model(snake_case , snake_case , token_type_ids=snake_case )
A_ : List[Any] = model(snake_case , snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :List[Any] , snake_case :List[str] , snake_case :Optional[Any] , snake_case :Dict , snake_case :Any , snake_case :Union[str, Any] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Optional[int] = TFLayoutLMForMaskedLM(config=snake_case )
A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , snake_case :Tuple , snake_case :Tuple , snake_case :List[str] , snake_case :Tuple , snake_case :str , snake_case :Optional[int] , snake_case :Any ):
'''simple docstring'''
A_ : Union[str, Any] = self.num_labels
A_ : int = TFLayoutLMForSequenceClassification(config=snake_case )
A_ : Optional[int] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict , snake_case :str , snake_case :Optional[Any] , snake_case :int , snake_case :Any , snake_case :Tuple , snake_case :List[str] , snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.num_labels
A_ : str = TFLayoutLMForTokenClassification(config=snake_case )
A_ : Union[str, Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[str] , snake_case :Optional[int] , snake_case :Union[str, Any] , snake_case :List[Any] , snake_case :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ):
'''simple docstring'''
A_ : Optional[Any] = TFLayoutLMForQuestionAnswering(config=snake_case )
A_ : List[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__UpperCamelCase = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 10
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Tuple = TFLayoutLMModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[str] = TFLayoutLMModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def __snake_case ( ) -> Optional[Any]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
A_ : int = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
A_ : int = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
A_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
A_ : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
A_ : Tuple = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : str = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
A_ , A_ , A_ , A_ , A_ : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Tuple = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the sequence output on [0, :3, :3]
A_ : List[Any] = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-3 ) )
# test the pooled output on [1, :3]
A_ : Optional[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
A_ , A_ , A_ , A_ , A_ : Any = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Dict = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
A_ : List[str] = outputs.loss
A_ : Union[str, Any] = (2,)
self.assertEqual(loss.shape , snake_case )
# test the shape of the logits
A_ : Tuple = outputs.logits
A_ : Tuple = (2, 2)
self.assertEqual(logits.shape , snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : int = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
A_ , A_ , A_ , A_ , A_ : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Union[str, Any] = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
# test the shape of the logits
A_ : Dict = outputs.logits
A_ : List[Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
A_ , A_ , A_ , A_ , A_ : str = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Union[str, Any] = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the shape of the logits
A_ : Union[str, Any] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , snake_case )
self.assertEqual(outputs.end_logits.shape , snake_case )
| 300
| 1
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
@dataclass
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self :Tuple , **snake_case :List[str] ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A_ : Any = deprecated_arg[3:]
setattr(self , snake_case , not kwargs.pop(snake_case ) )
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}" )
A_ : Any = kwargs.pop("torchscript" , self.torchscript )
A_ : str = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics )
A_ : Tuple = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level )
super().__init__(**snake_case )
__UpperCamelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Trace the models using torchscript'''} )
__UpperCamelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
__UpperCamelCase = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
A_ : Optional[Any] = torch.device("cpu" )
A_ : Dict = 0
elif is_torch_tpu_available():
A_ : str = xm.xla_device()
A_ : Optional[int] = 0
else:
A_ : List[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
A_ : int = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
return self.n_gpu > 0
| 300
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase : Optional[int] = '''
Human: <<task>>
Assistant: '''
_lowerCAmelCase : int = '''huggingface-tools/default-prompts'''
_lowerCAmelCase : Any = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict="run" ) -> List[Any]:
if prompt_or_repo_id is None:
A_ : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
A_ : Optional[Any] = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 300
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Optional[int] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_lowerCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 300
|
def __snake_case ( _lowerCAmelCase : list ) -> list:
if len(_lowerCAmelCase ) <= 1:
return [tuple(_lowerCAmelCase )]
A_ : Tuple = []
def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ):
A_ : List[str] = [0] * n
res.append(tuple(_lowerCAmelCase ) )
A_ : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
A_ , A_ : str = arr[i], arr[0]
else:
A_ , A_ : List[str] = arr[i], arr[c[i]]
res.append(tuple(_lowerCAmelCase ) )
c[i] += 1
A_ : Tuple = 0
else:
A_ : Dict = 0
i += 1
generate(len(_lowerCAmelCase ) , _lowerCAmelCase )
return res
if __name__ == "__main__":
_lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 300
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_lowerCAmelCase : List[Any] = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
A_ : Tuple = self.transformer_dir
shutil.copy(
os.path.join(snake_case , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[int] = "src/transformers"
shutil.rmtree(self.transformer_dir )
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Optional[Any] , snake_case :Dict , snake_case :Optional[int] , snake_case :Tuple=None ):
'''simple docstring'''
A_ : int = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
A_ : List[Any] = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
A_ : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
A_ : Dict = black.format_str(snake_case , mode=snake_case )
A_ : str = os.path.join(self.transformer_dir , "new_code.py" )
with open(snake_case , "w" , newline="\n" ) as f:
f.write(snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case )
with open(snake_case , "r" ) as f:
self.assertTrue(f.read() , snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Optional[Any] = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , snake_case , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , snake_case ) , )
# Copy consistency with a really long name
A_ : str = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , f"{long_class_name}LMPredictionHead" , re.sub("Bert" , snake_case , snake_case ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , snake_case , overwrite_result=re.sub("Bert" , "TestModel" , snake_case ) , )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
A_ : Any = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
A_ : Any = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
A_ : Union[str, Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
A_ , A_ : Optional[int] = check_copies.convert_to_localized_md(
snake_case , snake_case , localized_readme["format_model_list"] )
self.assertFalse(snake_case )
self.assertEqual(snake_case , snake_case )
A_ , A_ : List[Any] = check_copies.convert_to_localized_md(
snake_case , snake_case , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(snake_case )
A_ : Dict = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
A_ : int = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
A_ : List[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
A_ , A_ : List[str] = check_copies.convert_to_localized_md(
snake_case , snake_case , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(snake_case , snake_case )
| 300
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : List[Any] = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : Any = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = RobertaTokenizer
def __init__( self :Dict , snake_case :List[str]=None , snake_case :List[Any]=None , snake_case :Union[str, Any]=None , snake_case :List[str]="replace" , snake_case :Tuple="<s>" , snake_case :Union[str, Any]="</s>" , snake_case :str="</s>" , snake_case :Union[str, Any]="<s>" , snake_case :int="<unk>" , snake_case :Tuple="<pad>" , snake_case :List[str]="<mask>" , snake_case :Any=False , snake_case :Union[str, Any]=True , **snake_case :Optional[int] , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
A_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : Dict = getattr(snake_case , pre_tok_state.pop("type" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**snake_case )
A_ : Optional[int] = add_prefix_space
A_ : Optional[int] = "post_processor"
A_ : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
A_ : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : List[Any] = tuple(state["sep"] )
if "cls" in state:
A_ : Optional[Any] = tuple(state["cls"] )
A_ : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : List[Any] = add_prefix_space
A_ : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
A_ : List[str] = trim_offsets
A_ : Any = True
if changes_to_apply:
A_ : Optional[Any] = getattr(snake_case , state.pop("type" ) )
A_ : Any = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Dict ):
'''simple docstring'''
A_ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
A_ : Any = value
def SCREAMING_SNAKE_CASE ( self :Dict , *snake_case :Tuple , **snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : Any = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , *snake_case :str , **snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : Any = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Optional[Any]=None ):
'''simple docstring'''
A_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : Any = [self.sep_token_id]
A_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 300
| 1
|
def __snake_case ( _lowerCAmelCase : int ) -> list:
A_ : Union[str, Any] = int(_lowerCAmelCase )
if n_element < 1:
A_ : Dict = ValueError("a should be a positive number" )
raise my_error
A_ : Union[str, Any] = [1]
A_ , A_ , A_ : str = (0, 0, 0)
A_ : Dict = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_lowerCAmelCase : List[str] = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
_lowerCAmelCase : List[Any] = hamming(int(n))
print('''-----------------------------------------------------''')
print(F'''The list with nth numbers is: {hamming_numbers}''')
print('''-----------------------------------------------------''')
| 300
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCAmelCase : int = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
_lowerCAmelCase : Tuple = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
_lowerCAmelCase : int = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[List[List[str]]] , snake_case :List[List[str]] , snake_case :int = 1 , snake_case :int = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case , hypotheses=snake_case , min_len=snake_case , max_len=snake_case )
}
| 300
| 1
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : Union[str, Any] ) -> Tuple:
A_ : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
A_ : List[str] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
A_ : str = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
A_ : Optional[int] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
A_ : Any = sd.pop(_lowerCAmelCase )
A_ : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
A_ : Union[str, Any] = sd[key]
# We split QKV in separate Q,K,V
A_ : Optional[Any] = key.replace(".qkv_proj." , ".q_proj." )
A_ : int = key.replace(".qkv_proj." , ".k_proj." )
A_ : Optional[Any] = key.replace(".qkv_proj." , ".v_proj." )
A_ : str = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
A_ , A_ , A_ : Dict = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
A_ : List[Any] = q
A_ : Dict = k
A_ : Tuple = v
del sd[key]
return sd
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str]=None ) -> List[Any]:
A_ : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
A_ : Tuple = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
A_ : Any = OPTConfig()
A_ : int = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
_lowerCAmelCase : List[str] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 300
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
A_ : Tuple = tmp_path / "cache"
A_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Optional[Any] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ) -> str:
A_ : List[Any] = tmp_path / "cache"
A_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : int = features.copy() if features else default_expected_features
A_ : str = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Union[str, Any] = ParquetDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ) -> Optional[Any]:
A_ : Dict = tmp_path / "cache"
A_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> List[str]:
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
A_ : int = parquet_path
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
A_ : Optional[int] = [parquet_path]
A_ : Optional[int] = tmp_path / "cache"
A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=("train",) ) -> Tuple:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
for split in splits:
A_ : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> Optional[int]:
A_ : Optional[Any] = tmp_path / "cache"
A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Union[str, Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str ) -> Tuple:
A_ : Optional[Any] = tmp_path / "cache"
A_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : List[str] = features.copy() if features else default_expected_features
A_ : Tuple = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Optional[int] = ParquetDatasetReader({"train": parquet_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Union[str, Any]:
if split:
A_ : Any = {split: parquet_path}
else:
A_ : Optional[Any] = "train"
A_ : str = {"train": parquet_path, "test": parquet_path}
A_ : Any = tmp_path / "cache"
A_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Dict = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ) -> Dict:
A_ : List[str] = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
A_ : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
A_ : Dict = pf.read()
assert dataset.data.table == output_table
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ) -> List[Any]:
A_ : Tuple = str(shared_datadir / "test_image_rgb.jpg" )
A_ : int = {"image": [image_path]}
A_ : Optional[Any] = Features({"image": Image()} )
A_ : Union[str, Any] = Dataset.from_dict(_lowerCAmelCase , features=_lowerCAmelCase )
A_ : Tuple = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
A_ : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
A_ : int = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCAmelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ) -> Any:
assert get_writer_batch_size(_lowerCAmelCase ) == expected
| 300
| 1
|
_lowerCAmelCase : Tuple = '''Input must be a string of 8 numbers plus letter'''
_lowerCAmelCase : int = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def __snake_case ( _lowerCAmelCase : str ) -> bool:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A_ : int = f"Expected string as input, found {type(_lowerCAmelCase ).__name__}"
raise TypeError(_lowerCAmelCase )
A_ : Optional[Any] = spanish_id.replace("-" , "" ).upper()
if len(_lowerCAmelCase ) != 9:
raise ValueError(_lowerCAmelCase )
try:
A_ : Optional[int] = int(spanish_id_clean[0:8] )
A_ : Optional[Any] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_lowerCAmelCase ) from ex
if letter.isdigit():
raise ValueError(_lowerCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]="shi-labs/oneformer_demo" ) -> int:
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) as f:
A_ : Optional[int] = json.load(_lowerCAmelCase )
A_ : Union[str, Any] = {}
A_ : Tuple = []
A_ : Optional[Any] = []
for key, info in class_info.items():
A_ : Tuple = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
A_ : Optional[Any] = thing_ids
A_ : int = class_names
return metadata
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :List[Any] , snake_case :List[str] , snake_case :int=7 , snake_case :Optional[int]=3 , snake_case :Union[str, Any]=30 , snake_case :Tuple=400 , snake_case :List[Any]=None , snake_case :Optional[Any]=True , snake_case :Tuple=True , snake_case :Dict=[0.5, 0.5, 0.5] , snake_case :Any=[0.5, 0.5, 0.5] , snake_case :Optional[int]=10 , snake_case :Tuple=False , snake_case :Optional[int]=255 , snake_case :Optional[Any]="shi-labs/oneformer_demo" , snake_case :Optional[Any]="ade20k_panoptic.json" , snake_case :Optional[int]=10 , ):
'''simple docstring'''
A_ : Tuple = parent
A_ : List[str] = batch_size
A_ : Optional[int] = num_channels
A_ : Tuple = min_resolution
A_ : List[Any] = max_resolution
A_ : Union[str, Any] = do_resize
A_ : Any = {"shortest_edge": 32, "longest_edge": 1_333} if size is None else size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : List[Any] = image_std
A_ : Union[str, Any] = class_info_file
A_ : List[Any] = prepare_metadata(snake_case , snake_case )
A_ : Tuple = num_text
A_ : str = repo_path
# for the post_process_functions
A_ : Any = 2
A_ : int = 10
A_ : Optional[int] = 10
A_ : Tuple = 3
A_ : Tuple = 4
A_ : str = num_labels
A_ : int = do_reduce_labels
A_ : List[Any] = ignore_index
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Any , snake_case :Any=False ):
'''simple docstring'''
if not batched:
A_ : List[str] = image_inputs[0]
if isinstance(snake_case , Image.Image ):
A_ , A_ : Dict = image.size
else:
A_ , A_ : Tuple = image.shape[1], image.shape[2]
if w < h:
A_ : str = int(self.size["shortest_edge"] * h / w )
A_ : Any = self.size["shortest_edge"]
elif w > h:
A_ : Optional[int] = self.size["shortest_edge"]
A_ : List[str] = int(self.size["shortest_edge"] * w / h )
else:
A_ : List[str] = self.size["shortest_edge"]
A_ : Optional[Any] = self.size["shortest_edge"]
else:
A_ : Tuple = []
for image in image_inputs:
A_ , A_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : Tuple = max(snake_case , key=lambda snake_case : item[0] )[0]
A_ : Union[str, Any] = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__UpperCamelCase = image_processing_class
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , "image_mean" ) )
self.assertTrue(hasattr(snake_case , "image_std" ) )
self.assertTrue(hasattr(snake_case , "do_normalize" ) )
self.assertTrue(hasattr(snake_case , "do_resize" ) )
self.assertTrue(hasattr(snake_case , "size" ) )
self.assertTrue(hasattr(snake_case , "ignore_index" ) )
self.assertTrue(hasattr(snake_case , "class_info_file" ) )
self.assertTrue(hasattr(snake_case , "num_text" ) )
self.assertTrue(hasattr(snake_case , "repo_path" ) )
self.assertTrue(hasattr(snake_case , "metadata" ) )
self.assertTrue(hasattr(snake_case , "do_reduce_labels" ) )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : str = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Optional[Any] = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : List[str] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
A_ : List[str] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : List[str] = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : int = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : Optional[Any] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
A_ : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : Any = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict=False , snake_case :str=False , snake_case :Dict="np" ):
'''simple docstring'''
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
A_ : Tuple = self.image_processing_tester.num_labels
A_ : str = None
A_ : Tuple = None
A_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
if with_segmentation_maps:
A_ : List[str] = num_labels
if is_instance_map:
A_ : List[str] = list(range(snake_case ) ) * 2
A_ : int = dict(enumerate(snake_case ) )
A_ : List[str] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
A_ : int = [Image.fromarray(snake_case ) for annotation in annotations]
A_ : List[str] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , snake_case , return_tensors="pt" , instance_id_to_semantic_id=snake_case , pad_and_return_pixel_mask=snake_case , )
return inputs
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
def common(snake_case :Dict=False , snake_case :Optional[int]=None ):
A_ : Tuple = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case , is_instance_map=snake_case , segmentation_type=snake_case )
A_ : Optional[Any] = inputs["mask_labels"]
A_ : List[Any] = inputs["class_labels"]
A_ : Optional[Any] = inputs["pixel_values"]
A_ : int = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case , snake_case , snake_case ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case )
common(is_instance_map=snake_case , segmentation_type="pil" )
common(is_instance_map=snake_case , segmentation_type="pil" )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = np.zeros((20, 50) )
A_ : List[str] = 1
A_ : int = 1
A_ : Optional[Any] = 1
A_ : Any = binary_mask_to_rle(snake_case )
self.assertEqual(len(snake_case ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : int = fature_extractor.post_process_semantic_segmentation(snake_case )
self.assertEqual(len(snake_case ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
A_ : Optional[int] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
A_ : List[Any] = fature_extractor.post_process_semantic_segmentation(snake_case , target_sizes=snake_case )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : str = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : Optional[Any] = image_processor.post_process_instance_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : Optional[Any] = image_processor.post_process_panoptic_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 300
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase : Union[str, Any] = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = ['''ConvNextFeatureExtractor''']
_lowerCAmelCase : Optional[int] = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[Any] = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 300
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''data2vec-vision'''
def __init__( self :int , snake_case :Optional[int]=768 , snake_case :Any=12 , snake_case :Any=12 , snake_case :Tuple=3_072 , snake_case :Any="gelu" , snake_case :Tuple=0.0 , snake_case :int=0.0 , snake_case :Any=0.02 , snake_case :str=1e-12 , snake_case :List[str]=224 , snake_case :Dict=16 , snake_case :int=3 , snake_case :int=False , snake_case :str=False , snake_case :List[Any]=False , snake_case :Optional[Any]=False , snake_case :Tuple=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Any=True , snake_case :Optional[Any]=[3, 5, 7, 11] , snake_case :Dict=[1, 2, 3, 6] , snake_case :int=True , snake_case :List[Any]=0.4 , snake_case :Any=256 , snake_case :Union[str, Any]=1 , snake_case :Union[str, Any]=False , snake_case :Any=255 , **snake_case :int , ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : Dict = hidden_size
A_ : Tuple = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Any = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Any = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Optional[Any] = initializer_range
A_ : List[str] = layer_norm_eps
A_ : str = image_size
A_ : Optional[int] = patch_size
A_ : int = num_channels
A_ : Optional[Any] = use_mask_token
A_ : Optional[Any] = use_absolute_position_embeddings
A_ : Optional[int] = use_relative_position_bias
A_ : Dict = use_shared_relative_position_bias
A_ : Any = layer_scale_init_value
A_ : Optional[Any] = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : str = use_auxiliary_head
A_ : List[Any] = auxiliary_loss_weight
A_ : List[str] = auxiliary_channels
A_ : Dict = auxiliary_num_convs
A_ : List[str] = auxiliary_concat_input
A_ : Optional[int] = semantic_loss_ignore_index
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return 1e-4
| 300
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_lowerCAmelCase : int = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCamelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__UpperCamelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__UpperCamelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Optional[int] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
A_ : Any = text_classifier("This is great !" )
self.assertEqual(nested_simplify(snake_case ) , [{"label": "LABEL_0", "score": 0.504}] )
A_ : Tuple = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(snake_case ) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] )
A_ : str = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
A_ : str = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(snake_case ) , [{"label": "LABEL_0", "score": 0.504}] )
# Legacy behavior
A_ : Optional[int] = text_classifier("This is great !" , return_all_scores=snake_case )
self.assertEqual(nested_simplify(snake_case ) , [{"label": "LABEL_0", "score": 0.504}] )
A_ : Dict = text_classifier("This is great !" , return_all_scores=snake_case )
self.assertEqual(
nested_simplify(snake_case ) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] )
A_ : List[str] = text_classifier(["This is great !", "Something else"] , return_all_scores=snake_case )
self.assertEqual(
nested_simplify(snake_case ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
A_ : int = text_classifier(["This is great !", "Something else"] , return_all_scores=snake_case )
self.assertEqual(
nested_simplify(snake_case ) , [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
] , )
@require_torch
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
import torch
A_ : str = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
A_ : Optional[int] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(snake_case ) , [{"label": "LABEL_0", "score": 0.504}] )
@require_tf
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
A_ : Tuple = text_classifier("This is great !" )
self.assertEqual(nested_simplify(snake_case ) , [{"label": "LABEL_0", "score": 0.504}] )
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = pipeline("text-classification" )
A_ : Dict = text_classifier("This is great !" )
self.assertEqual(nested_simplify(snake_case ) , [{"label": "POSITIVE", "score": 1.0}] )
A_ : str = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(snake_case ) , [{"label": "NEGATIVE", "score": 1.0}] )
A_ : int = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(snake_case ) , [{"label": "POSITIVE", "score": 0.988}] )
@slow
@require_tf
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : str = pipeline("text-classification" , framework="tf" )
A_ : Any = text_classifier("This is great !" )
self.assertEqual(nested_simplify(snake_case ) , [{"label": "POSITIVE", "score": 1.0}] )
A_ : Optional[int] = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(snake_case ) , [{"label": "NEGATIVE", "score": 1.0}] )
A_ : Optional[int] = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(snake_case ) , [{"label": "POSITIVE", "score": 0.988}] )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :int , snake_case :int , snake_case :Optional[Any] ):
'''simple docstring'''
A_ : str = TextClassificationPipeline(model=snake_case , tokenizer=snake_case )
return text_classifier, ["HuggingFace is in", "This is another test"]
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict , snake_case :Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
A_ : Dict = "HuggingFace is in"
A_ : Tuple = text_classifier(snake_case )
self.assertEqual(nested_simplify(snake_case ) , [{"label": ANY(snake_case ), "score": ANY(snake_case )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
A_ : Optional[Any] = ["HuggingFace is in ", "Paris is in France"]
A_ : Optional[int] = text_classifier(snake_case )
self.assertEqual(
nested_simplify(snake_case ) , [{"label": ANY(snake_case ), "score": ANY(snake_case )}, {"label": ANY(snake_case ), "score": ANY(snake_case )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
A_ : Tuple = text_classifier(snake_case , top_k=snake_case )
A_ : int = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(snake_case ) , [[{"label": ANY(snake_case ), "score": ANY(snake_case )}] * N, [{"label": ANY(snake_case ), "score": ANY(snake_case )}] * N] , )
A_ : int = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
A_ : Dict = text_classifier(snake_case )
self.assertEqual(
nested_simplify(snake_case ) , {"label": ANY(snake_case ), "score": ANY(snake_case )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
A_ : Any = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(snake_case ):
text_classifier(snake_case )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
A_ : List[str] = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(snake_case ) , [{"label": ANY(snake_case ), "score": ANY(snake_case )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 300
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = ['''input_features''', '''attention_mask''']
def __init__( self :int , snake_case :int=80 , snake_case :Optional[int]=16_000 , snake_case :Tuple=0.0 , snake_case :Optional[int]=10 , snake_case :Optional[Any]=25 , snake_case :Dict="hamming_window" , snake_case :Tuple=32768.0 , snake_case :str=0.97 , snake_case :List[str]=1.0 , snake_case :Dict=True , snake_case :str=True , snake_case :Optional[Any]=False , **snake_case :Union[str, Any] , ):
'''simple docstring'''
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
A_ : Union[str, Any] = feature_size
A_ : int = sampling_rate
A_ : str = padding_value
A_ : int = hop_length
A_ : List[str] = win_length
A_ : Any = frame_signal_scale
A_ : str = preemphasis_coeff
A_ : List[str] = mel_floor
A_ : str = normalize_means
A_ : Any = normalize_vars
A_ : Optional[Any] = win_function
A_ : Dict = return_attention_mask
A_ : List[str] = win_length * sampling_rate // 1_000
A_ : List[str] = hop_length * sampling_rate // 1_000
A_ : List[str] = optimal_fft_length(self.sample_size )
A_ : str = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
A_ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case )
else:
A_ : List[str] = window_function(window_length=self.sample_size , name=self.win_function )
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
A_ : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=snake_case , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case , preemphasis=self.preemphasis_coeff , mel_filters=snake_case , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :str ):
'''simple docstring'''
if self.normalize_means:
A_ : int = x[:input_length].mean(axis=0 )
A_ : Any = np.subtract(snake_case , snake_case )
if self.normalize_vars:
A_ : List[Any] = x[:input_length].std(axis=0 )
A_ : Optional[int] = np.divide(snake_case , snake_case )
if input_length < x.shape[0]:
A_ : Optional[int] = padding_value
# make sure array is in float32
A_ : Union[str, Any] = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[np.ndarray] , snake_case :Optional[np.ndarray] = None ):
'''simple docstring'''
A_ : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case , snake_case , self.padding_value ) for x, n in zip(snake_case , snake_case )]
def __call__( self :int , snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case :Union[bool, str, PaddingStrategy] = False , snake_case :Optional[int] = None , snake_case :bool = False , snake_case :Optional[int] = None , snake_case :Optional[bool] = None , snake_case :Optional[Union[str, TensorType]] = None , snake_case :Optional[int] = None , **snake_case :Dict , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
A_ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
A_ : Optional[Any] = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : List[Any] = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
A_ : int = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Tuple = [raw_speech]
# extract fbank features
A_ : int = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
A_ : Union[str, Any] = BatchFeature({"input_features": features} )
A_ : str = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
# make sure list is in array format
A_ : Optional[int] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , snake_case ):
A_ : Union[str, Any] = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features]
A_ : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
A_ : Any = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
A_ : Dict = (
np.array(snake_case , dtype=np.intaa )
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
A_ : Optional[int] = self.normalize(
padded_inputs["input_features"] , attention_mask=snake_case )
if return_tensors is not None:
A_ : Dict = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 300
| 1
|
import functools
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str ) -> int:
A_ : List[str] = len(_lowerCAmelCase )
A_ : str = len(_lowerCAmelCase )
@functools.cache
def min_distance(_lowerCAmelCase : int , _lowerCAmelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
A_ : Optional[Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _lowerCAmelCase ) , 1 + min_distance(_lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self :List[Any] , snake_case :int , snake_case :int , snake_case :Optional[int] = None , snake_case :int = 50_257 , snake_case :int = 1_024 , snake_case :int = 768 , snake_case :int = 12 , snake_case :int = 12 , snake_case :Optional[int] = None , snake_case :str = "gelu_new" , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 1e-5 , snake_case :float = 0.02 , snake_case :bool = True , snake_case :bool = True , snake_case :bool = False , snake_case :bool = False , ):
'''simple docstring'''
super().__init__()
A_ : Tuple = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
A_ : List[Any] = prefix_inner_dim
A_ : Union[str, Any] = prefix_hidden_dim
A_ : List[str] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A_ : List[Any] = (
nn.Linear(self.prefix_hidden_dim , snake_case ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A_ : List[Any] = GPTaConfig(
vocab_size=snake_case , n_positions=snake_case , n_embd=snake_case , n_layer=snake_case , n_head=snake_case , n_inner=snake_case , activation_function=snake_case , resid_pdrop=snake_case , embd_pdrop=snake_case , attn_pdrop=snake_case , layer_norm_epsilon=snake_case , initializer_range=snake_case , scale_attn_weights=snake_case , use_cache=snake_case , scale_attn_by_inverse_layer_idx=snake_case , reorder_and_upcast_attn=snake_case , )
A_ : Optional[Any] = GPTaLMHeadModel(snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.Tensor , snake_case :torch.Tensor , snake_case :Optional[torch.Tensor] = None , snake_case :Optional[torch.Tensor] = None , ):
'''simple docstring'''
A_ : Any = self.transformer.transformer.wte(snake_case )
A_ : str = self.encode_prefix(snake_case )
A_ : Union[str, Any] = self.decode_prefix(snake_case )
A_ : int = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A_ : Dict = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A_ : int = torch.cat((dummy_token, input_ids) , dim=1 )
A_ : Union[str, Any] = self.transformer(inputs_embeds=snake_case , labels=snake_case , attention_mask=snake_case )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE ( self :str , snake_case :int , snake_case :torch.device ):
'''simple docstring'''
return torch.zeros(snake_case , self.prefix_length , dtype=torch.intaa , device=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :int ):
'''simple docstring'''
return self.encode_prefix(snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Dict , snake_case :Optional[int] , snake_case :Any ):
'''simple docstring'''
A_ : Any = torch.split(snake_case , 1 , dim=0 )
A_ : Optional[int] = []
A_ : Union[str, Any] = []
for feature in features:
A_ : Tuple = self.decode_prefix(feature.to(snake_case ) ) # back to the clip feature
# Only support beam search for now
A_ , A_ : Dict = self.generate_beam(
input_embeds=snake_case , device=snake_case , eos_token_id=snake_case )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A_ : int = torch.stack(snake_case )
A_ : int = torch.stack(snake_case )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int=None , snake_case :str=None , snake_case :int=None , snake_case :int = 5 , snake_case :int = 67 , snake_case :float = 1.0 , snake_case :Optional[int] = None , ):
'''simple docstring'''
A_ : Optional[Any] = eos_token_id
A_ : List[Any] = None
A_ : List[Any] = None
A_ : str = torch.ones(snake_case , device=snake_case , dtype=torch.int )
A_ : Any = torch.zeros(snake_case , device=snake_case , dtype=torch.bool )
if input_embeds is not None:
A_ : Any = input_embeds
else:
A_ : Optional[Any] = self.transformer.transformer.wte(snake_case )
for i in range(snake_case ):
A_ : Optional[Any] = self.transformer(inputs_embeds=snake_case )
A_ : str = outputs.logits
A_ : int = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A_ : List[str] = logits.softmax(-1 ).log()
if scores is None:
A_ , A_ : Union[str, Any] = logits.topk(snake_case , -1 )
A_ : Tuple = generated.expand(snake_case , *generated.shape[1:] )
A_ , A_ : str = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A_ : Union[str, Any] = next_tokens
else:
A_ : List[str] = tokens.expand(snake_case , *tokens.shape[1:] )
A_ : Union[str, Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
A_ : List[str] = -float(np.inf )
A_ : List[Any] = 0
A_ : Union[str, Any] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A_ : Optional[Any] = scores_sum / seq_lengths[:, None]
A_ , A_ : List[str] = scores_sum_average.view(-1 ).topk(snake_case , -1 )
A_ : str = next_tokens // scores_sum.shape[1]
A_ : Union[str, Any] = seq_lengths[next_tokens_source]
A_ : Optional[int] = next_tokens % scores_sum.shape[1]
A_ : Tuple = next_tokens.unsqueeze(1 )
A_ : Tuple = tokens[next_tokens_source]
A_ : Dict = torch.cat((tokens, next_tokens) , dim=1 )
A_ : Dict = generated[next_tokens_source]
A_ : Union[str, Any] = scores_sum_average * seq_lengths
A_ : Optional[int] = is_stopped[next_tokens_source]
A_ : Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A_ : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1 )
A_ : Any = is_stopped + next_tokens.eq(snake_case ).squeeze()
if is_stopped.all():
break
A_ : int = scores / seq_lengths
A_ : str = scores.argsort(descending=snake_case )
# tokens tensors are already padded to max_seq_length
A_ : Dict = [tokens[i] for i in order]
A_ : int = torch.stack(snake_case , dim=0 )
A_ : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 300
| 1
|
import sys
import turtle
def __snake_case ( _lowerCAmelCase : tuple[float, float] , _lowerCAmelCase : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __snake_case ( _lowerCAmelCase : tuple[float, float] , _lowerCAmelCase : tuple[float, float] , _lowerCAmelCase : tuple[float, float] , _lowerCAmelCase : int , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_lowerCAmelCase : Optional[int] = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_lowerCAmelCase : Optional[int] = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 300
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *snake_case :Tuple , **snake_case :Any ):
'''simple docstring'''
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , snake_case , )
super().__init__(*snake_case , **snake_case )
| 300
| 1
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
class __magic_name__ ( enum.Enum ):
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 1
@add_end_docstrings(lowerCamelCase__ )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''generated'''
def __init__( self :Any , *snake_case :Any , **snake_case :List[str] ):
'''simple docstring'''
super().__init__(*snake_case , **snake_case )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Tuple=None , snake_case :List[str]=None , snake_case :Dict=None , snake_case :str=None , snake_case :List[Any]=None , snake_case :Dict=None , **snake_case :Optional[int] , ):
'''simple docstring'''
A_ : str = {}
if truncation is not None:
A_ : str = truncation
A_ : Optional[Any] = generate_kwargs
A_ : Tuple = {}
if return_tensors is not None and return_type is None:
A_ : Dict = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
A_ : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
A_ : Any = clean_up_tokenization_spaces
if stop_sequence is not None:
A_ : Optional[Any] = self.tokenizer.encode(snake_case , add_special_tokens=snake_case )
if len(snake_case ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
A_ : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :int , snake_case :int , snake_case :int ):
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE ( self :str , *snake_case :Dict , snake_case :Optional[Any] ):
'''simple docstring'''
A_ : Any = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , snake_case ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
A_ : Tuple = ([prefix + arg for arg in args[0]],)
A_ : Any = True
elif isinstance(args[0] , snake_case ):
A_ : Optional[int] = (prefix + args[0],)
A_ : List[Any] = False
else:
raise ValueError(
f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`" )
A_ : int = self.tokenizer(*snake_case , padding=snake_case , truncation=snake_case , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self :int , *snake_case :Dict , **snake_case :Any ):
'''simple docstring'''
A_ : Optional[int] = super().__call__(*snake_case , **snake_case )
if (
isinstance(args[0] , snake_case )
and all(isinstance(snake_case , snake_case ) for el in args[0] )
and all(len(snake_case ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Optional[Any] , snake_case :Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , **snake_case :Optional[int] ):
'''simple docstring'''
A_ : Dict = self._parse_and_tokenize(snake_case , truncation=snake_case , **snake_case )
return inputs
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , **snake_case :Optional[int] ):
'''simple docstring'''
if self.framework == "pt":
A_ , A_ : List[Any] = model_inputs["input_ids"].shape
elif self.framework == "tf":
A_ , A_ : Any = tf.shape(model_inputs["input_ids"] ).numpy()
A_ : List[Any] = generate_kwargs.get("min_length" , self.model.config.min_length )
A_ : int = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(snake_case , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
A_ : Any = self.model.generate(**snake_case , **snake_case )
A_ : str = output_ids.shape[0]
if self.framework == "pt":
A_ : Any = output_ids.reshape(snake_case , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
A_ : List[str] = tf.reshape(snake_case , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Tuple , snake_case :Optional[int]=ReturnType.TEXT , snake_case :List[str]=False ):
'''simple docstring'''
A_ : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
A_ : Union[str, Any] = {f"{self.return_name}_token_ids": output_ids}
elif return_type == ReturnType.TEXT:
A_ : Dict = {
f"{self.return_name}_text": self.tokenizer.decode(
snake_case , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , )
}
records.append(snake_case )
return records
@add_end_docstrings(lowerCamelCase__ )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''summary'''
def __call__( self :List[Any] , *snake_case :Optional[Any] , **snake_case :List[str] ):
'''simple docstring'''
return super().__call__(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :int , snake_case :int , snake_case :int ):
'''simple docstring'''
if max_length < min_length:
logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}." )
if input_length < max_length:
logger.warning(
f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is "
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})" )
@add_end_docstrings(lowerCamelCase__ )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''translation'''
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :int , snake_case :int ):
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider "
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def SCREAMING_SNAKE_CASE ( self :int , *snake_case :Optional[int] , snake_case :Tuple=TruncationStrategy.DO_NOT_TRUNCATE , snake_case :Optional[int]=None , snake_case :Dict=None ):
'''simple docstring'''
if getattr(self.tokenizer , "_build_translation_inputs" , snake_case ):
return self.tokenizer._build_translation_inputs(
*snake_case , return_tensors=self.framework , truncation=snake_case , src_lang=snake_case , tgt_lang=snake_case )
else:
return super()._parse_and_tokenize(*snake_case , truncation=snake_case )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Union[str, Any]=None , snake_case :List[str]=None , **snake_case :int ):
'''simple docstring'''
A_ , A_ , A_ : int = super()._sanitize_parameters(**snake_case )
if src_lang is not None:
A_ : List[Any] = src_lang
if tgt_lang is not None:
A_ : List[str] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
A_ : Tuple = kwargs.get("task" , self.task )
A_ : int = task.split("_" )
if task and len(snake_case ) == 4:
# translation, XX, to YY
A_ : Optional[int] = items[1]
A_ : str = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self :Any , *snake_case :List[str] , **snake_case :int ):
'''simple docstring'''
return super().__call__(*snake_case , **snake_case )
| 300
|
from __future__ import annotations
def __snake_case ( _lowerCAmelCase : list[float] ) -> bool:
if len(_lowerCAmelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A_ : List[str] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 1
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCAmelCase : List[str] = logging.getLogger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''sequence-classification'''
def __init__( self :List[Any] , snake_case :int ):
'''simple docstring'''
if type(snake_case ) == dict:
A_ : List[Any] = Namespace(**snake_case )
A_ : Union[str, Any] = glue_output_modes[hparams.task]
A_ : Any = glue_tasks_num_labels[hparams.task]
super().__init__(snake_case , snake_case , self.mode )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :Optional[int] ):
'''simple docstring'''
return self.model(**snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Optional[int] , snake_case :List[str] ):
'''simple docstring'''
A_ : List[str] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A_ : str = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
A_ : str = self(**snake_case )
A_ : Dict = outputs[0]
A_ : Optional[int] = self.trainer.lr_schedulers[0]["scheduler"]
A_ : Any = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.hparams
A_ : Optional[Any] = processors[args.task]()
A_ : Tuple = processor.get_labels()
for mode in ["train", "dev"]:
A_ : Optional[Any] = self._feature_file(snake_case )
if os.path.exists(snake_case ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , snake_case )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
A_ : Tuple = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
A_ : List[str] = convert_examples_to_features(
snake_case , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , snake_case )
torch.save(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str , snake_case :int , snake_case :bool = False ):
'''simple docstring'''
A_ : Union[str, Any] = "dev" if mode == "test" else mode
A_ : int = self._feature_file(snake_case )
logger.info("Loading features from cached file %s" , snake_case )
A_ : Any = torch.load(snake_case )
A_ : List[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
A_ : Tuple = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
A_ : Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
A_ : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
A_ : Optional[int] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(snake_case , snake_case , snake_case , snake_case ) , batch_size=snake_case , shuffle=snake_case , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :Tuple ):
'''simple docstring'''
A_ : str = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A_ : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
A_ : int = self(**snake_case )
A_ , A_ : Optional[Any] = outputs[:2]
A_ : Any = logits.detach().cpu().numpy()
A_ : List[str] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def SCREAMING_SNAKE_CASE ( self :str , snake_case :List[str] ):
'''simple docstring'''
A_ : List[str] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
A_ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
A_ : Union[str, Any] = np.argmax(snake_case , axis=1 )
elif self.hparams.glue_output_mode == "regression":
A_ : Tuple = np.squeeze(snake_case )
A_ : Dict = np.concatenate([x["target"] for x in outputs] , axis=0 )
A_ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
A_ : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
A_ : List[str] = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , snake_case , snake_case )}
A_ : int = dict(results.items() )
A_ : List[Any] = results
return ret, preds_list, out_label_list
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :list ):
'''simple docstring'''
A_ , A_ , A_ : Dict = self._eval_end(snake_case )
A_ : List[str] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any ):
'''simple docstring'''
A_ , A_ , A_ : List[str] = self._eval_end(snake_case )
A_ : Union[str, Any] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :Optional[Any] , snake_case :Tuple ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(snake_case , snake_case )
parser.add_argument(
"--max_seq_length" , default=128 , type=snake_case , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=snake_case , required=snake_case , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=snake_case , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def __snake_case ( ) -> Any:
A_ : Optional[int] = argparse.ArgumentParser()
add_generic_args(_lowerCAmelCase , os.getcwd() )
A_ : str = GLUETransformer.add_model_specific_args(_lowerCAmelCase , os.getcwd() )
A_ : List[str] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
A_ : Tuple = os.path.join(
"./results" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
A_ : Any = GLUETransformer(_lowerCAmelCase )
A_ : Dict = generic_train(_lowerCAmelCase , _lowerCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
A_ : int = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_lowerCAmelCase ) )
A_ : Union[str, Any] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 300
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , snake_case :AutoencoderKL , snake_case :CLIPTextModel , snake_case :CLIPTokenizer , snake_case :UNetaDConditionModel , snake_case :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case :StableDiffusionSafetyChecker , snake_case :CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def __call__( self :Any , snake_case :Union[str, List[str]] , snake_case :int = 512 , snake_case :int = 512 , snake_case :int = 50 , snake_case :float = 7.5 , snake_case :Optional[Union[str, List[str]]] = None , snake_case :Optional[int] = 1 , snake_case :float = 0.0 , snake_case :Optional[torch.Generator] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[str] = "pil" , snake_case :bool = True , snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case :int = 1 , snake_case :Optional[torch.FloatTensor] = None , **snake_case :Optional[Any] , ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
A_ : Dict = 1
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = len(snake_case )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case )}." )
# get prompt text embeddings
A_ : int = self.tokenizer(
snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_ , A_ , A_ : int = text_embeddings.shape
A_ : List[str] = text_embeddings.repeat(1 , snake_case , 1 )
A_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ : List[str]
if negative_prompt is None:
A_ : List[str] = [""]
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !="
f" {type(snake_case )}." )
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A_ : Any = negative_prompt
A_ : Optional[int] = text_input_ids.shape[-1]
A_ : Dict = self.tokenizer(
snake_case , padding="max_length" , max_length=snake_case , truncation=snake_case , return_tensors="pt" , )
A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ : Tuple = uncond_embeddings.shape[1]
A_ : Dict = uncond_embeddings.repeat(snake_case , snake_case , 1 )
A_ : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A_ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ : Tuple = torch.randn(
snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(self.device )
A_ : Optional[Any] = torch.randn(snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(
self.device )
else:
A_ : int = torch.randn(
snake_case , generator=snake_case , device=self.device , dtype=snake_case )
A_ : Optional[int] = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A_ : Tuple = latents_reference.to(self.device )
A_ : Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A_ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A_ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A_ : Optional[Any] = 0 if dx < 0 else dx
A_ : Optional[Any] = 0 if dy < 0 else dy
A_ : List[str] = max(-dx , 0 )
A_ : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A_ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ : str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : List[str] = {}
if accepts_eta:
A_ : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
A_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
A_ : List[str] = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.chunk(2 )
A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
A_ : List[str] = 1 / 0.18215 * latents
A_ : Tuple = self.vae.decode(snake_case ).sample
A_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A_ : int = self.feature_extractor(self.numpy_to_pil(snake_case ) , return_tensors="pt" ).to(
self.device )
A_ , A_ : List[str] = self.safety_checker(
images=snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A_ : List[str] = None
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
| 300
| 1
|
from __future__ import annotations
from math import pi, sqrt
def __snake_case ( _lowerCAmelCase : float , _lowerCAmelCase : float ) -> tuple:
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Dict:
A_ : Optional[Any] = nn.functional.normalize(_lowerCAmelCase )
A_ : List[str] = nn.functional.normalize(_lowerCAmelCase )
return torch.mm(_lowerCAmelCase , normalized_text_embeds.t() )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self :int , snake_case :CLIPConfig ):
'''simple docstring'''
super().__init__(snake_case )
A_ : int = CLIPVisionModel(config.vision_config )
A_ : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case )
A_ : Tuple = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case )
A_ : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case )
A_ : List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=snake_case )
A_ : int = nn.Parameter(torch.ones(3 ) , requires_grad=snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ):
'''simple docstring'''
A_ : List[Any] = self.vision_model(snake_case )[1] # pooled_output
A_ : List[Any] = self.visual_projection(snake_case )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Optional[Any] = cosine_distance(snake_case , self.special_care_embeds ).cpu().float().numpy()
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ).cpu().float().numpy()
A_ : Union[str, Any] = []
A_ : Any = image_embeds.shape[0]
for i in range(snake_case ):
A_ : Optional[int] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A_ : Optional[Any] = special_cos_dist[i][concept_idx]
A_ : Tuple = self.special_care_embeds_weights[concept_idx].item()
A_ : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
A_ : Any = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A_ : Tuple = cos_dist[i][concept_idx]
A_ : Tuple = self.concept_embeds_weights[concept_idx].item()
A_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(snake_case )
result.append(snake_case )
A_ : Any = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor ):
'''simple docstring'''
A_ : List[str] = self.vision_model(snake_case )[1] # pooled_output
A_ : int = self.visual_projection(snake_case )
A_ : Tuple = cosine_distance(snake_case , self.special_care_embeds )
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
A_ : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A_ : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
A_ : Optional[Any] = special_care * 0.01
A_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A_ : Union[str, Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A_ : Union[str, Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 300
| 1
|
# Lint as: python3
import itertools
import os
import re
_lowerCAmelCase : List[Any] = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
_lowerCAmelCase : Any = re.compile(r'''([a-z\d])([A-Z])''')
_lowerCAmelCase : Union[str, Any] = re.compile(r'''(?<!_)_(?!_)''')
_lowerCAmelCase : Any = re.compile(r'''(_{2,})''')
_lowerCAmelCase : Optional[int] = r'''^\w+(\.\w+)*$'''
_lowerCAmelCase : str = r'''<>:/\|?*'''
def __snake_case ( _lowerCAmelCase : List[str] ) -> List[str]:
A_ : str = _uppercase_uppercase_re.sub(r"\1_\2" , _lowerCAmelCase )
A_ : List[Any] = _lowercase_uppercase_re.sub(r"\1_\2" , _lowerCAmelCase )
return name.lower()
def __snake_case ( _lowerCAmelCase : Optional[Any] ) -> Any:
A_ : Any = _single_underscore_re.split(_lowerCAmelCase )
A_ : Tuple = [_multiple_underscores_re.split(_lowerCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_lowerCAmelCase ) if n != "" )
def __snake_case ( _lowerCAmelCase : Union[str, Any] ) -> Dict:
if os.path.basename(_lowerCAmelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(_lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : int ) -> List[Any]:
if os.path.basename(_lowerCAmelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , _lowerCAmelCase ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(_lowerCAmelCase )}-{split}"
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str]=None ) -> Optional[int]:
A_ : List[Any] = filename_prefix_for_split(_lowerCAmelCase , _lowerCAmelCase )
if filetype_suffix:
prefix += f".{filetype_suffix}"
A_ : Optional[int] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
return f"{filepath}*"
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Union[str, Any]=None ) -> List[str]:
A_ : Tuple = filename_prefix_for_split(_lowerCAmelCase , _lowerCAmelCase )
A_ : int = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if shard_lengths:
A_ : int = len(_lowerCAmelCase )
A_ : List[Any] = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(_lowerCAmelCase )]
if filetype_suffix:
A_ : Any = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
A_ : str = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 300
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
A_ : Tuple = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Dict:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : str = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" )
A_ : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : Optional[Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Optional[Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ) -> Any:
A_ : Dict = dct.pop(_lowerCAmelCase )
A_ : List[Any] = val
def __snake_case ( _lowerCAmelCase : List[str] ) -> int:
if "handwritten" in checkpoint_url:
A_ : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
A_ : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
A_ : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=_lowerCAmelCase )
A_ : Tuple = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : Tuple = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Optional[Any] = 1024
A_ : Union[str, Any] = 4096
A_ : Union[str, Any] = 24
A_ : List[Any] = 16
A_ : List[str] = 1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Dict = False
A_ : int = "relu"
A_ : Optional[int] = 1024
A_ : Any = True
A_ : List[Any] = False
A_ : Optional[int] = False
# load HuggingFace model
A_ : Union[str, Any] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase )
A_ : str = TrOCRForCausalLM(_lowerCAmelCase )
A_ : List[str] = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
A_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" , check_hash=_lowerCAmelCase )["model"]
A_ : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Dict = state_dict.pop(_lowerCAmelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
A_ : List[str] = val
else:
A_ : Optional[Any] = val
# load state dict
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A_ : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
A_ : Any = RobertaTokenizer.from_pretrained("roberta-large" )
A_ : Union[str, Any] = TrOCRProcessor(_lowerCAmelCase , _lowerCAmelCase )
A_ : List[str] = processor(images=prepare_img(_lowerCAmelCase ) , return_tensors="pt" ).pixel_values
# verify logits
A_ : Union[str, Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Optional[int] = model(pixel_values=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
A_ : Tuple = outputs.logits
A_ : Union[str, Any] = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Union[str, Any] = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : str = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 300
| 1
|
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str ) -> list:
A_ : str = len(_lowerCAmelCase )
A_ : Optional[Any] = []
for i in range(len(_lowerCAmelCase ) - pat_len + 1 ):
A_ : Dict = True
for j in range(_lowerCAmelCase ):
if s[i + j] != pattern[j]:
A_ : Dict = False
break
if match_found:
position.append(_lowerCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 300
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 1
@register_to_config
def __init__( self :Union[str, Any] , snake_case :int = 2_000 , snake_case :float = 0.15 , snake_case :float = 0.01 , snake_case :float = 1348.0 , snake_case :float = 1e-5 , snake_case :int = 1 , ):
'''simple docstring'''
A_ : Dict = sigma_max
# setable values
A_ : List[Any] = None
self.set_sigmas(snake_case , snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :torch.FloatTensor , snake_case :Optional[int] = None ):
'''simple docstring'''
return sample
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :float = None , snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
A_ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
A_ : Tuple = torch.linspace(1 , snake_case , snake_case , device=snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int , snake_case :float = None , snake_case :float = None , snake_case :float = None ):
'''simple docstring'''
A_ : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min
A_ : Any = sigma_max if sigma_max is not None else self.config.sigma_max
A_ : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(snake_case , snake_case )
A_ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
A_ : Any = torch.exp(torch.linspace(math.log(snake_case ) , math.log(snake_case ) , snake_case ) )
A_ : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Dict ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :int , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
A_ : int = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
A_ : Optional[Any] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
A_ : Dict = timesteps.to(self.discrete_sigmas.device )
A_ : Optional[int] = self.discrete_sigmas[timesteps].to(sample.device )
A_ : int = self.get_adjacent_sigma(snake_case , snake_case ).to(sample.device )
A_ : Union[str, Any] = torch.zeros_like(snake_case )
A_ : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
A_ : Optional[int] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
A_ : Tuple = diffusion.unsqueeze(-1 )
A_ : Optional[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
A_ : List[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=snake_case , device=sample.device , dtype=sample.dtype )
A_ : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
A_ : Any = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=snake_case , prev_sample_mean=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
A_ : Dict = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
A_ : int = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
A_ : List[Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
A_ : Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
A_ : Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
A_ : int = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
A_ : str = step_size.unsqueeze(-1 )
A_ : Optional[Any] = sample + step_size * model_output
A_ : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , ):
'''simple docstring'''
A_ : Union[str, Any] = timesteps.to(original_samples.device )
A_ : List[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
A_ : List[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(snake_case ) * sigmas[:, None, None, None]
)
A_ : Optional[int] = noise + original_samples
return noisy_samples
def __len__( self :Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : str = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''gpt_neox'''
def __init__( self :int , snake_case :List[str]=50_432 , snake_case :int=6_144 , snake_case :str=44 , snake_case :Union[str, Any]=64 , snake_case :Optional[Any]=24_576 , snake_case :int="gelu" , snake_case :List[Any]=0.25 , snake_case :Dict=10_000 , snake_case :str=0.0 , snake_case :Dict=0.0 , snake_case :Optional[int]=0.1 , snake_case :str=2_048 , snake_case :Union[str, Any]=0.02 , snake_case :str=1e-5 , snake_case :Optional[Any]=True , snake_case :str=0 , snake_case :int=2 , snake_case :List[Any]=False , snake_case :Optional[int]=True , snake_case :List[Any]=None , **snake_case :Optional[int] , ):
'''simple docstring'''
super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
A_ : Dict = vocab_size
A_ : Tuple = max_position_embeddings
A_ : Any = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : List[Any] = rotary_pct
A_ : List[Any] = rotary_emb_base
A_ : List[str] = attention_dropout
A_ : Union[str, Any] = hidden_dropout
A_ : int = classifier_dropout
A_ : Union[str, Any] = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Any = use_cache
A_ : Optional[int] = tie_word_embeddings
A_ : Union[str, Any] = use_parallel_residual
A_ : int = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"got {self.rope_scaling}" )
A_ : int = self.rope_scaling.get("type" , snake_case )
A_ : List[str] = self.rope_scaling.get("factor" , snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(snake_case , snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 300
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : float | Decimal , _lowerCAmelCase : float = 10**-10 ) -> float:
A_ : Dict = a
while True:
A_ : Union[str, Any] = Decimal(_lowerCAmelCase ) - (
Decimal(eval(_lowerCAmelCase ) ) / Decimal(eval(str(diff(_lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCAmelCase ) ) < precision: # noqa: S307
return float(_lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 300
| 1
|
from __future__ import annotations
import os
from typing import Any
import requests
_lowerCAmelCase : Optional[int] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_lowerCAmelCase : int = BASE_URL + '''/user'''
# https://github.com/settings/tokens
_lowerCAmelCase : str = os.environ.get('''USER_TOKEN''', '''''')
def __snake_case ( _lowerCAmelCase : str ) -> dict[Any, Any]:
A_ : int = {
"Authorization": f"token {auth_token}",
"Accept": "application/vnd.github.v3+json",
}
return requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 300
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowerCAmelCase : List[Any] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
_lowerCAmelCase : Union[str, Any] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
_lowerCAmelCase : Optional[Any] = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Optional[int] , snake_case :List[Any] , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , snake_case :bool = False , ):
'''simple docstring'''
A_ : List[str] = len(references[0] )
if any(len(snake_case ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A_ : int = [[refs[i] for refs in references] for i in range(snake_case )]
A_ : Optional[Any] = TER(
normalized=snake_case , no_punct=snake_case , asian_support=snake_case , case_sensitive=snake_case , )
A_ : List[Any] = sb_ter.corpus_score(snake_case , snake_case )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 300
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.